Skip to content

Commit

Permalink
Reconcile MultiClusterService
Browse files Browse the repository at this point in the history
  • Loading branch information
wahabmk committed Oct 11, 2024
1 parent aba826a commit 0739289
Show file tree
Hide file tree
Showing 18 changed files with 438 additions and 168 deletions.
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,8 @@ FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmrepositories-$(FLUX_SOURC
FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmchart-$(FLUX_SOURCE_VERSION).yaml
FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version')
FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml
SVELTOS_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/projectsveltos/libsveltos") | .Version')
SVELTOS_CRD ?= $(EXTERNAL_CRD_DIR)/sveltos-$(SVELTOS_VERSION).yaml

## Tool Binaries
KUBECTL ?= kubectl
Expand Down Expand Up @@ -429,8 +431,12 @@ $(FLUX_SOURCE_REPO_CRD): $(EXTERNAL_CRD_DIR)
rm -f $(FLUX_SOURCE_REPO_CRD)
curl -s https://raw.githubusercontent.com/fluxcd/source-controller/$(FLUX_SOURCE_VERSION)/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml > $(FLUX_SOURCE_REPO_CRD)

$(SVELTOS_CRD): $(EXTERNAL_CRD_DIR)
rm -f $(SVELTOS_CRD)
curl -s https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD)

.PHONY: external-crd
external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD)
external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD)

.PHONY: kind
kind: $(KIND) ## Download kind locally if necessary.
Expand Down
3 changes: 3 additions & 0 deletions api/v1alpha1/managedcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,9 @@ type ManagedClusterSpec struct {
Priority int32 `json:"priority,omitempty"`
// DryRun specifies whether the template should be applied after validation or only validated.
DryRun bool `json:"dryRun,omitempty"`

// +kubebuilder:default:=false

// StopOnConflict specifies what to do in case of a conflict.
// E.g. If another object is already managing a service.
// By default the remaining services will be deployed even if conflict is detected.
Expand Down
10 changes: 10 additions & 0 deletions api/v1alpha1/multiclusterservice_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
// MultiClusterServiceFinalizer is finalizer applied to MultiClusterService objects.
MultiClusterServiceFinalizer = "hmc.mirantis.com/multicluster-service"
// MultiClusterServiceKind is the string representation of a MultiClusterServiceKind.
MultiClusterServiceKind = "MultiClusterService"
)

// ServiceSpec represents a Service to be managed
type ServiceSpec struct {
// Values is the helm values to be passed to the template.
Expand Down Expand Up @@ -57,6 +64,9 @@ type MultiClusterServiceSpec struct {
// In case of conflict with another object managing the service,
// the one with higher priority will get to deploy its services.
Priority int32 `json:"priority,omitempty"`

// +kubebuilder:default:=false

// StopOnConflict specifies what to do in case of a conflict.
// E.g. If another object is already managing a service.
// By default the remaining services will be deployed even if conflict is detected.
Expand Down
1 change: 1 addition & 0 deletions config/dev/aws-managedcluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ spec:
workersNumber: 1
installBeachHeadServices: false
template: aws-standalone-cp-0-0-1
priority: 100
services:
- template: kyverno-3-2-6
name: kyverno
Expand Down
13 changes: 13 additions & 0 deletions config/dev/multiclusterservice.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: hmc.mirantis.com/v1alpha1
kind: MultiClusterService
metadata:
name: global-ingress
spec:
priority: 1000
clusterSelector:
matchLabels:
app.kubernetes.io/managed-by: Helm
services:
- template: ingress-nginx-4-11-3
name: ingress-nginx
namespace: ingress-nginx
95 changes: 9 additions & 86 deletions internal/controller/managedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
Expand All @@ -51,7 +50,6 @@ import (
hmc "github.com/Mirantis/hmc/api/v1alpha1"
"github.com/Mirantis/hmc/internal/helm"
"github.com/Mirantis/hmc/internal/telemetry"
"github.com/Mirantis/hmc/internal/utils"
)

const (
Expand Down Expand Up @@ -375,71 +373,26 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma
// TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places.
func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) {
l := log.FromContext(ctx).WithValues("ManagedClusterController", fmt.Sprintf("%s/%s", mc.Namespace, mc.Name))
opts := []sveltos.HelmChartOpts{}

// NOTE: The Profile object will be updated with no helm
// charts if len(mc.Spec.Services) == 0. This will result in the
// helm charts being uninstalled on matching clusters if
// Profile originally had len(m.Spec.Sevices) > 0.
for _, svc := range mc.Spec.Services {
if svc.Disable {
l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mc.Name))
continue
}

tmpl := &hmc.ServiceTemplate{}
tmplRef := types.NamespacedName{Name: svc.Template, Namespace: mc.Namespace}
if err := r.Get(ctx, tmplRef, tmpl); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err)
}

source, err := r.getServiceTemplateSource(ctx, tmpl)
if err != nil {
return ctrl.Result{}, fmt.Errorf("could not get repository url: %w", err)
}

opts = append(opts, sveltos.HelmChartOpts{
Values: svc.Values,
RepositoryURL: source.Spec.URL,
// We don't have repository name so chart name becomes repository name.
RepositoryName: tmpl.Spec.Helm.ChartName,
ChartName: func() string {
if source.Spec.Type == utils.RegistryTypeOCI {
return tmpl.Spec.Helm.ChartName
}
// Sveltos accepts ChartName in <repository>/<chart> format for non-OCI.
// We don't have a repository name, so we can use <chart>/<chart> instead.
// See: https://projectsveltos.github.io/sveltos/addons/helm_charts/.
return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName)
}(),
ChartVersion: tmpl.Spec.Helm.ChartVersion,
ReleaseName: svc.Name,
ReleaseNamespace: func() string {
if svc.Namespace != "" {
return svc.Namespace
}
return svc.Name
}(),
// The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because
// the source.Spec.Insecure field is meant to be used for connecting to repositories
// over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for.
// See: https://github.com/fluxcd/source-controller/pull/1288
PlainHTTP: source.Spec.Insecure,
})
opts, err := HelmChartOpts(ctx, r.Client, l, mc.Namespace, mc.Spec.Services)
if err != nil {
return ctrl.Result{}, err
}

if _, err := sveltos.ReconcileProfile(ctx, r.Client, l, mc.Namespace, mc.Name,
map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
sveltos.ReconcileProfileOpts{
OwnerReference: &metav1.OwnerReference{
APIVersion: hmc.GroupVersion.String(),
Kind: hmc.ManagedClusterKind,
Name: mc.Name,
UID: mc.UID,
},
LabelSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
},
HelmChartOpts: opts,
Priority: mc.Spec.Priority,
StopOnConflict: mc.Spec.StopOnConflict,
Expand All @@ -455,36 +408,6 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M
return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil
}

// getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate.
// It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository.
func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) {
tmplRef := types.NamespacedName{Namespace: tmpl.Namespace, Name: tmpl.Name}

if tmpl.Status.ChartRef == nil {
return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String())
}

hc := &sourcev1.HelmChart{}
if err := r.Get(ctx, types.NamespacedName{
Namespace: tmpl.Status.ChartRef.Namespace,
Name: tmpl.Status.ChartRef.Name,
}, hc); err != nil {
return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err)
}

repo := &sourcev1.HelmRepository{}
if err := r.Get(ctx, types.NamespacedName{
// Using chart's namespace because it's source
// (helm repository in this case) should be within the same namespace.
Namespace: hc.Namespace,
Name: hc.Spec.SourceRef.Name,
}, repo); err != nil {
return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err)
}

return repo, nil
}

func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error {
install := action.NewInstall(actionConfig)
install.DryRun = true
Expand Down
136 changes: 133 additions & 3 deletions internal/controller/multiclusterservice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,19 @@ package controller

import (
"context"
"fmt"

apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

hmc "github.com/Mirantis/hmc/api/v1alpha1"
"github.com/Mirantis/hmc/internal/sveltos"
"github.com/Mirantis/hmc/internal/utils"
"github.com/go-logr/logr"
)

// MultiClusterServiceReconciler reconciles a MultiClusterService object
Expand All @@ -29,10 +37,132 @@ type MultiClusterServiceReconciler struct {
}

// Reconcile reconciles a MultiClusterService object.
func (*MultiClusterServiceReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
_ = ctrl.LoggerFrom(ctx)
func (r *MultiClusterServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := ctrl.LoggerFrom(ctx).WithValues("MultiClusterServiceController", req.NamespacedName.String())
l.Info("Reconciling MultiClusterService")

// TODO(https://github.com/Mirantis/hmc/issues/455): Implement me.
mcsvc := &hmc.MultiClusterService{}
err := r.Get(ctx, req.NamespacedName, mcsvc)
if apierrors.IsNotFound(err) {
l.Info("MultiClusterService not found, ignoring since object must be deleted")
return ctrl.Result{}, nil
}
if err != nil {
l.Error(err, "Failed to get MultiClusterService")
return ctrl.Result{}, err
}

if !mcsvc.DeletionTimestamp.IsZero() {
l.Info("Deleting MultiClusterService")
return r.reconcileDelete(ctx, mcsvc)
}

if ok := controllerutil.AddFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer); ok {
if err := r.Client.Update(ctx, mcsvc); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update MultiClusterService %s with finalizer %s: %w", mcsvc.Name, hmc.MultiClusterServiceFinalizer, err)
}
return ctrl.Result{}, nil
}

// By using DefaultSystemNamespace we are enforcing that MultiClusterService
// may only use ServiceTemplates that are present in the hmc-system namespace.
opts, err := HelmChartOpts(ctx, r.Client, l, utils.DefaultSystemNamespace, mcsvc.Spec.Services)
if err != nil {
return ctrl.Result{}, err
}

if _, err := sveltos.ReconcileClusterProfile(ctx, r.Client, l, mcsvc.Name,
sveltos.ReconcileProfileOpts{
OwnerReference: &metav1.OwnerReference{
APIVersion: hmc.GroupVersion.String(),
Kind: hmc.MultiClusterServiceKind,
Name: mcsvc.Name,
UID: mcsvc.UID,
},
LabelSelector: mcsvc.Spec.ClusterSelector,
HelmChartOpts: opts,
Priority: mcsvc.Spec.Priority,
StopOnConflict: mcsvc.Spec.StopOnConflict,
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile ClusterProfile: %w", err)
}

return ctrl.Result{}, nil
}

// HelmChartOpts returns slice of helm chart options to use with Sveltos.
// Namespace is the namespace of the referred templates in services slice.
func HelmChartOpts(ctx context.Context, c client.Client, l logr.Logger, namespace string, services []hmc.ServiceSpec) ([]sveltos.HelmChartOpts, error) {
opts := []sveltos.HelmChartOpts{}

// NOTE: The Profile/ClusterProfile object will be updated with
// no helm charts if len(mc.Spec.Services) == 0. This will result
// in the helm charts being uninstalled on matching clusters if
// Profile/ClusterProfile originally had len(m.Spec.Sevices) > 0.
for _, svc := range services {
if svc.Disable {
l.Info(fmt.Sprintf("Skip adding Template %s because Disable=true", svc.Template))
continue
}

tmpl := &hmc.ServiceTemplate{}
// Here we can use the same namespace for all services
// because if the services slice is part of:
// 1. ManagedCluster: Then the referred template must be in its own namespace.
// 2. MultiClusterService: Then the referred template must be in hmc-system namespace.
ref := types.NamespacedName{Name: svc.Template, Namespace: namespace}
if err := c.Get(ctx, ref, tmpl); err != nil {
return nil, fmt.Errorf("failed to get Template %s: %w", ref.String(), err)
}

source, err := TemplateSource(ctx, c, tmpl)
if err != nil {
return nil, fmt.Errorf("could not get repository url: %w", err)
}

opts = append(opts, sveltos.HelmChartOpts{
Values: svc.Values,
RepositoryURL: source.Spec.URL,
// We don't have repository name so chart name becomes repository name.
RepositoryName: tmpl.Spec.Helm.ChartName,
ChartName: func() string {
if source.Spec.Type == utils.RegistryTypeOCI {
return tmpl.Spec.Helm.ChartName
}
// Sveltos accepts ChartName in <repository>/<chart> format for non-OCI.
// We don't have a repository name, so we can use <chart>/<chart> instead.
// See: https://projectsveltos.github.io/sveltos/addons/helm_charts/.
return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName)
}(),
ChartVersion: tmpl.Spec.Helm.ChartVersion,
ReleaseName: svc.Name,
ReleaseNamespace: func() string {
if svc.Namespace != "" {
return svc.Namespace
}
return svc.Name
}(),
// The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because
// the source.Spec.Insecure field is meant to be used for connecting to repositories
// over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for.
// See: https://github.com/fluxcd/source-controller/pull/1288
PlainHTTP: source.Spec.Insecure,
})
}

return opts, nil
}

func (r *MultiClusterServiceReconciler) reconcileDelete(ctx context.Context, mcsvc *hmc.MultiClusterService) (ctrl.Result, error) {
if err := sveltos.DeleteClusterProfile(ctx, r.Client, mcsvc.Name); err != nil {
return ctrl.Result{}, err
}

if ok := controllerutil.RemoveFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer); ok {
if err := r.Client.Update(ctx, mcsvc); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to remove finalizer %s from MultiClusterService %s: %w", hmc.MultiClusterServiceFinalizer, mcsvc.Name, err)
}
}

return ctrl.Result{}, nil
}
Expand Down
Loading

0 comments on commit 0739289

Please sign in to comment.