diff --git a/cmd/main.go b/cmd/main.go index 344a7047c..655915fba 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -203,7 +203,6 @@ func main() { if err = (&controlplane.K0sController{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), ClientSet: clientSet, RESTConfig: restConfig, }).SetupWithManager(mgr); err != nil { diff --git a/go.mod b/go.mod index 389fff540..b033f0b6a 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22.0 require ( github.com/cloudflare/cfssl v1.6.4 github.com/go-logr/logr v1.4.2 + github.com/gobuffalo/flect v1.0.2 github.com/google/uuid v1.6.0 github.com/imdario/mergo v0.3.16 github.com/k0sproject/k0s v1.27.2-0.20230504131248-94378e521a29 @@ -12,11 +13,15 @@ require ( github.com/k0sproject/version v0.6.0 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 + github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.28.4 + k8s.io/apiextensions-apiserver v0.28.4 k8s.io/apimachinery v0.28.4 k8s.io/client-go v0.28.4 + k8s.io/klog/v2 v2.100.1 + k8s.io/kubectl v0.28.4 k8s.io/kubernetes v1.28.4 k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 sigs.k8s.io/controller-runtime v0.16.5 @@ -25,7 +30,7 @@ require ( require ( github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 + github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect @@ -56,7 +61,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect golang.org/x/crypto v0.27.0 golang.org/x/sync v0.8.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.24.0 gotest.tools/v3 v3.4.0 // indirect helm.sh/helm/v3 v3.11.3 // indirect k8s.io/kube-aggregator v0.27.2 // indirect @@ -90,7 +95,6 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gobuffalo/flect v1.0.2 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -124,7 +128,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/common v0.44.0 // indirect @@ -151,6 +154,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.25.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.14.0 // indirect golang.org/x/sys v0.25.0 // indirect @@ -167,14 +171,12 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiextensions-apiserver v0.28.4 // indirect k8s.io/apiserver v0.28.4 // indirect k8s.io/cloud-provider v0.27.1 // indirect k8s.io/cluster-bootstrap v0.28.4 // indirect k8s.io/component-base v0.28.4 // indirect k8s.io/component-helpers v0.28.4 // indirect k8s.io/controller-manager v0.28.4 // indirect - k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kms v0.28.4 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubelet v0.27.1 // indirect diff --git a/go.sum b/go.sum index 7a660023c..dc359eb64 100644 --- a/go.sum +++ b/go.sum @@ -551,6 +551,8 @@ k8s.io/kube-aggregator v0.28.4 h1:VIGTKc3cDaJ44bvj988MTapJyRPbWXXcCvlp7HVLq5Q= k8s.io/kube-aggregator v0.28.4/go.mod h1:SHehggsYGjVaE1CZTfhukAPpdhs7bflJiddLrabbQNY= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= +k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index deac5f34b..b9e41645a 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -43,6 +43,7 @@ import ( const ( etcdMemberConditionTypeJoined = "Joined" + generatedMachineRoleLabel = "cluster.x-k8s.io/generateMachine-role" ) func (c *K0sController) createMachine(ctx context.Context, name string, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane, infraRef corev1.ObjectReference, failureDomain *string) (*clusterv1.Machine, error) { @@ -50,7 +51,7 @@ func (c *K0sController) createMachine(ctx context.Context, name string, cluster if err != nil { return nil, fmt.Errorf("error generating machine: %w", err) } - _ = ctrl.SetControllerReference(kcp, machine, c.Scheme) + _ = ctrl.SetControllerReference(kcp, machine, c.Client.Scheme()) return machine, c.Client.Patch(ctx, machine, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", @@ -81,9 +82,9 @@ func (c *K0sController) generateMachine(_ context.Context, name string, cluster v := kcp.Spec.Version labels := map[string]string{ - "cluster.x-k8s.io/cluster-name": kcp.Name, - "cluster.x-k8s.io/control-plane": "true", - "cluster.x-k8s.io/generateMachine-role": "control-plane", + clusterv1.ClusterNameLabel: kcp.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", } for _, arg := range kcp.Spec.K0sConfigSpec.Args { @@ -210,7 +211,11 @@ func (c *K0sController) generateMachineFromTemplate(ctx context.Context, name st return nil, err } - _ = ctrl.SetControllerReference(kcp, unstructuredMachineTemplate, c.Scheme) + _ = ctrl.SetControllerReference(cluster, unstructuredMachineTemplate, c.Client.Scheme()) + err = c.Client.Patch(ctx, unstructuredMachineTemplate, client.Merge, &client.PatchOptions{FieldManager: "k0smotron"}) + if err != nil { + return nil, err + } template, found, err := unstructured.NestedMap(unstructuredMachineTemplate.UnstructuredContent(), "spec", "template") if !found { diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index b39e13554..d107b369d 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -34,7 +34,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -63,15 +62,20 @@ const ( ) var ( - ErrNotReady = fmt.Errorf("waiting for the state") - ErrNewMachinesNotReady = fmt.Errorf("waiting for new machines: %w", ErrNotReady) + ErrNotReady = fmt.Errorf("waiting for the state") + ErrNewMachinesNotReady = fmt.Errorf("waiting for new machines: %w", ErrNotReady) + FRPTokenNameTemplate = "%s-frp-token" + FRPConfigMapNameTemplate = "%s-frps-config" + FRPDeploymentNameTemplate = "%s-frps" + FRPServiceNameTemplate = "%s-frps" ) type K0sController struct { client.Client - Scheme *runtime.Scheme ClientSet *kubernetes.Clientset RESTConfig *rest.Config + // workloadClusterKubeClient is used during testing to inject a fake client + workloadClusterKubeClient *kubernetes.Clientset } // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=k0scontrolplanes/status,verbs=get;list;watch;create;update;patch;delete @@ -124,6 +128,11 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct return ctrl.Result{}, nil } + if annotations.IsPaused(cluster, kcp) { + log.Info("Reconciliation is paused for this object or owning cluster") + return ctrl.Result{}, nil + } + // Always patch the object to update the status defer func() { log.Info("Updating status") @@ -160,11 +169,6 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct log = log.WithValues("cluster", cluster.Name) - if annotations.IsPaused(cluster, kcp) { - log.Info("Reconciliation is paused for this object or owning cluster") - return ctrl.Result{}, nil - } - if err := c.ensureCertificates(ctx, cluster, kcp); err != nil { log.Error(err, "Failed to ensure certificates") return ctrl.Result{}, err @@ -630,7 +634,7 @@ token = ` + frpToken + ` ` } - frpsCMName := kcp.GetName() + "-frps-config" + frpsCMName := fmt.Sprintf(FRPConfigMapNameTemplate, kcp.GetName()) cm := corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", @@ -645,7 +649,7 @@ token = ` + frpToken + ` }, } - _ = ctrl.SetControllerReference(kcp, &cm, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &cm, c.Client.Scheme()) err = c.Client.Patch(ctx, &cm, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating ConfigMap: %w", err) @@ -657,7 +661,7 @@ token = ` + frpToken + ` Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ - Name: kcp.GetName() + "-frps", + Name: fmt.Sprintf(FRPDeploymentNameTemplate, kcp.GetName()), Namespace: kcp.GetNamespace(), }, Spec: appsv1.DeploymentSpec{ @@ -714,7 +718,7 @@ token = ` + frpToken + ` }}, }, } - _ = ctrl.SetControllerReference(kcp, &frpsDeployment, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &frpsDeployment, c.Client.Scheme()) err = c.Client.Patch(ctx, &frpsDeployment, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating Deployment: %w", err) @@ -726,7 +730,7 @@ token = ` + frpToken + ` Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: kcp.GetName() + "-frps", + Name: fmt.Sprintf(FRPServiceNameTemplate, kcp.GetName()), Namespace: kcp.GetNamespace(), }, Spec: corev1.ServiceSpec{ @@ -750,7 +754,7 @@ token = ` + frpToken + ` Type: corev1.ServiceTypeNodePort, }, } - _ = ctrl.SetControllerReference(kcp, &frpsService, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &frpsService, c.Client.Scheme()) err = c.Client.Patch(ctx, &frpsService, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating Service: %w", err) @@ -769,7 +773,7 @@ func (c *K0sController) detectNodeIP(ctx context.Context, _ *cpv1beta1.K0sContro } func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (string, error) { - secretName := cluster.Name + "-frp-token" + secretName := fmt.Sprintf(FRPTokenNameTemplate, cluster.Name) var existingSecret corev1.Secret err := c.Client.Get(ctx, client.ObjectKey{Name: secretName, Namespace: cluster.Namespace}, &existingSecret) @@ -798,7 +802,7 @@ func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.C Type: clusterv1.ClusterSecretType, } - _ = ctrl.SetControllerReference(kcp, frpSecret, c.Scheme) + _ = ctrl.SetControllerReference(kcp, frpSecret, c.Client.Scheme()) return frpToken, c.Client.Patch(ctx, frpSecret, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", diff --git a/internal/controller/controlplane/k0s_controlplane_controller_test.go b/internal/controller/controlplane/k0s_controlplane_controller_test.go index c6ea41bc8..ceca5bf3b 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller_test.go +++ b/internal/controller/controlplane/k0s_controlplane_controller_test.go @@ -17,14 +17,45 @@ limitations under the License. package controlplane import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" "testing" + "time" + . "github.com/onsi/gomega" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + restfake "k8s.io/client-go/rest/fake" + "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + kubeadmConfig "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + autopilot "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2" bootstrapv1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) func TestK0sConfigEnrichment(t *testing.T) { @@ -119,3 +150,1254 @@ func TestK0sConfigEnrichment(t *testing.T) { }) } } + +func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-return-error-cluster-owner-missing") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: 20 * time.Second, Requeue: true})) + + g.Expect(testEnv.CleanupAndWait(ctx, cluster)).To(Succeed()) + + g.Eventually(func() error { + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + return err + }, 10*time.Second).Should(HaveOccurred()) +} + +func TestReconcileNoK0sControlPlane(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-no-control-plane") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcilePausedCluster(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-paused-cluster") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + + // Cluster 'paused'. + cluster.Spec.Paused = true + + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcilePausedK0sControlPlane(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-paused-k0scontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // K0sControlPlane with 'paused' annotation. + kcp.Annotations = map[string]string{"cluster.x-k8s.io/paused": "true"} + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcileTunneling(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-tunneling") + g.Expect(err).ToNot(HaveOccurred()) + + node := createNode() + g.Expect(testEnv.Create(ctx, node)).To(Succeed()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + Tunneling: bootstrapv1.TunnelingSpec{ + Enabled: true, + }, + } + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + clientSet, err := kubernetes.NewForConfig(testEnv.Config) + g.Expect(err).ToNot(HaveOccurred()) + + r := &K0sController{ + Client: testEnv, + ClientSet: clientSet, + } + err = r.reconcileTunneling(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + frpToken, err := clientSet.CoreV1().Secrets(ns.Name).Get(ctx, fmt.Sprintf(FRPTokenNameTemplate, cluster.Name), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpToken, kcp)).To(BeTrue()) + + frpCM, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get(ctx, fmt.Sprintf(FRPConfigMapNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpCM, kcp)).To(BeTrue()) + + frpDeploy, err := clientSet.AppsV1().Deployments(ns.Name).Get(ctx, fmt.Sprintf(FRPDeploymentNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpDeploy, kcp)).To(BeTrue()) + + frpService, err := clientSet.CoreV1().Services(ns.Name).Get(ctx, fmt.Sprintf(FRPServiceNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpService, kcp)).To(BeTrue()) +} + +func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-empty-api-endpoints") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + + // Host and Port with zero values. + cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{} + + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + kubeconfigSecret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name, secret.Kubeconfig), + } + g.Expect(testEnv.GetAPIReader().Get(ctx, secretKey, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) +} + +func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-missing-ca-certificates") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + kubeconfigSecret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name, secret.Kubeconfig), + } + g.Expect(testEnv.GetAPIReader().Get(ctx, secretKey, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) +} + +func TestReconcileKubeconfigTunnelingModeNotEnabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-not-enabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling not enabled. + kcp.Spec.K0sConfigSpec.Tunneling.Enabled = false + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestReconcileKubeconfigTunnelingModeProxy(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-proxy") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling mode = 'proxy' + kcp.Spec.K0sConfigSpec.Tunneling = bootstrapv1.TunnelingSpec{ + Enabled: true, + Mode: "proxy", + ServerAddress: "test.com", + TunnelingNodePort: 9999, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + clusterCerts := secret.NewCertificatesForInitialControlPlane(&kubeadmConfig.ClusterConfiguration{}) + g.Expect(clusterCerts.Generate()).To(Succeed()) + caCert := clusterCerts.GetByPurpose(secret.ClusterCA) + caCertSecret := caCert.AsSecret( + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, + *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")), + ) + g.Expect(testEnv.Create(ctx, caCertSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name+"-proxied", secret.Kubeconfig), + } + + kubeconfigProxiedSecret := &corev1.Secret{} + g.Expect(testEnv.Get(ctx, secretKey, kubeconfigProxiedSecret)).To(Succeed()) + + kubeconfigProxiedSecretCrt, _ := runtime.Decode(clientcmdlatest.Codec, kubeconfigProxiedSecret.Data["value"]) + for _, v := range kubeconfigProxiedSecretCrt.(*api.Config).Clusters { + g.Expect(v.Server).To(Equal("https://test.endpoint:6443")) + g.Expect(v.ProxyURL).To(Equal("http://test.com:9999")) + } +} + +func TestReconcileKubeconfigTunnelingModeTunnel(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-tunnel") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling mode = 'tunnel' + kcp.Spec.K0sConfigSpec.Tunneling = bootstrapv1.TunnelingSpec{ + Enabled: true, + Mode: "tunnel", + ServerAddress: "test.com", + TunnelingNodePort: 9999, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + clusterCerts := secret.NewCertificatesForInitialControlPlane(&kubeadmConfig.ClusterConfiguration{}) + g.Expect(clusterCerts.Generate()).To(Succeed()) + caCert := clusterCerts.GetByPurpose(secret.ClusterCA) + caCertSecret := caCert.AsSecret( + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, + *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")), + ) + g.Expect(testEnv.Create(ctx, caCertSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name+"-tunneled", secret.Kubeconfig), + } + kubeconfigProxiedSecret := &corev1.Secret{} + g.Expect(testEnv.Get(ctx, secretKey, kubeconfigProxiedSecret)).ToNot(HaveOccurred()) + + kubeconfigProxiedSecretCrt, _ := runtime.Decode(clientcmdlatest.Codec, kubeconfigProxiedSecret.Data["value"]) + for _, v := range kubeconfigProxiedSecretCrt.(*api.Config).Clusters { + g.Expect(v.Server).To(Equal("https://test.com:9999")) + } +} + +func TestReconcileK0sConfigNotProvided(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-k0sconfig-not-provided") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + kcp.Spec.K0sConfigSpec.K0s = nil + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(BeNil()) +} + +func TestReconcileK0sConfigWithNLLBEnabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-nllb-enabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Enable '.spec.network.nodeLocalLoadBalancing' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + "network": map[string]interface{}{ + "nodeLocalLoadBalancing": map[string]interface{}{ + "enabled": true, + }, + }, + }, + }, + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.endpoint", + "test.com", + }, + }, + "network": map[string]interface{}{ + "nodeLocalLoadBalancing": map[string]interface{}{ + "enabled": true, + }, + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileK0sConfigWithNLLBDisabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-nllb-disabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Disable '.spec.network.nodeLocalLoadBalancing' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + }, + }, + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + "externalAddress": "test.endpoint", + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileK0sConfigTunnelingServerAddressToApiSans(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-tunneling-serveraddress-to-api-sans") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // With '.spec.k0sConfigSpec.Tunneling.ServerAddress' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + }, + }, + }, + Tunneling: bootstrapv1.TunnelingSpec{ + ServerAddress: "my-tunneling-server-address.com", + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + "my-tunneling-server-address.com", + }, + "externalAddress": "test.endpoint", + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileMachinesScaleUp(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machine-scale-up") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 5 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + r := &K0sController{ + Client: testEnv, + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + + machineNotRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-machine", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + }, + } + g.Expect(testEnv.Create(ctx, machineNotRelatedToControlPlane)).To(Succeed()) + + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + } +} + +func TestReconcileMachinesScaleDown(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machines-scale-down") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 1 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + + thirdMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 2), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + thirdMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, thirdMachineRelatedToControlPlane)).To(Succeed()) + + machineNotRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-machine", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + }, + } + g.Expect(testEnv.Create(ctx, machineNotRelatedToControlPlane)).To(Succeed()) + + g.Eventually(func(g Gomega) { + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(desiredReplicas)) + + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + + // verify that the bootrap config related to the existing machines is present. + bootstrapObjectKey := client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Name, + } + kc := &bootstrapv1.K0sControllerConfig{} + g.Expect(testEnv.GetAPIReader().Get(ctx, bootstrapObjectKey, kc)).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(kc, m)).To(BeTrue()) + } + }).Should(Succeed()) +} + +func TestReconcileMachinesSyncOldMachines(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machines-sync-old-machines") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 3 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + clientSet, err := kubernetes.NewForConfig(testEnv.Config) + g.Expect(err).ToNot(HaveOccurred()) + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + ClientSet: clientSet, + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.29.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + + thirdMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(kcp.Name, 2), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.29.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + thirdMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, thirdMachineRelatedToControlPlane)).To(Succeed()) + + g.Eventually(func(g Gomega) { + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(desiredReplicas)) + + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + generatedMachineRoleLabel: "control-plane", + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + + // verify that the bootrap config related to the existing machines is present. + bootstrapObjectKey := client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Name, + } + kc := &bootstrapv1.K0sControllerConfig{} + g.Expect(testEnv.GetAPIReader().Get(ctx, bootstrapObjectKey, kc)).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(kc, m)).To(BeTrue()) + } + }, 5*time.Second).Should(Succeed()) +} + +func TestReconcileInitializeControlPlanes(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-initialize-controlplanes") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + kcp.Spec.Replicas = 1 + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + expectedLabels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + } + + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(testEnv.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed()) + g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) + g.Expect(kcp.Status.Version).To(Equal(fmt.Sprintf("%s+%s", kcp.Spec.Version, defaultK0sSuffix))) + g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) + g.Expect(testEnv.GetAPIReader().Get(ctx, util.ObjectKey(gmt), gmt)).To(Succeed()) + g.Expect(gmt.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + UID: cluster.UID, + })) + g.Expect(conditions.IsFalse(kcp, cpv1beta1.ControlPlaneReadyCondition)).To(BeTrue()) + + // Expected secrets are created + caSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.ClusterCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(caSecret).NotTo(BeNil()) + g.Expect(caSecret.Data).NotTo(BeEmpty()) + g.Expect(caSecret.Labels).To(Equal(expectedLabels)) + + etcdSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.EtcdCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(etcdSecret).NotTo(BeNil()) + g.Expect(etcdSecret.Data).NotTo(BeEmpty()) + g.Expect(etcdSecret.Labels).To(Equal(expectedLabels)) + + kubeconfigSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.Kubeconfig) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kubeconfigSecret).NotTo(BeNil()) + g.Expect(kubeconfigSecret.Data).NotTo(BeEmpty()) + g.Expect(kubeconfigSecret.Labels).To(Equal(expectedLabels)) + k, err := kubeconfig.FromSecret(ctx, testEnv, util.ObjectKey(cluster)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(k).NotTo(BeEmpty()) + + proxySecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.FrontProxyCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(proxySecret).NotTo(BeNil()) + g.Expect(proxySecret.Data).NotTo(BeEmpty()) + g.Expect(proxySecret.Labels).To(Equal(expectedLabels)) + + saSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.ServiceAccount) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(saSecret).NotTo(BeNil()) + g.Expect(saSecret.Data).NotTo(BeEmpty()) + g.Expect(saSecret.Labels).To(Equal(expectedLabels)) + + machineList := &clusterv1.MachineList{} + g.Expect(testEnv.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(HaveLen(1)) + machine := machineList.Items[0] + g.Expect(machine.Name).To(HavePrefix(kcp.Name)) + g.Expect(*machine.Spec.Version).To(Equal(fmt.Sprintf("%s+%s", kcp.Spec.Version, defaultK0sSuffix))) + // Newly cloned infra objects should have the infraref annotation. + infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, gmt.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, gmt.GroupVersionKind().GroupKind().String())) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(1)) + + g.Expect(metav1.IsControlledBy(&k0sBootstrapConfigList.Items[0], &machine)).To(BeTrue()) + +} + +func roundTripperForWorkloadClusterAPI(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + + switch req.Method { + case "GET": + if strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/") { + res, err := json.Marshal(autopilot.ControlNode{}) + if err != nil { + return nil, err + } + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: io.NopCloser(bytes.NewReader(res))}, nil + + } + case "DELETE": + if strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/") { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case "PATCH": + switch { + case strings.HasPrefix(req.URL.Path, "/apis/etcd.k0sproject.io/v1beta1/etcdmembers/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case strings.HasPrefix(req.URL.Path, "/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + } + } + + return &http.Response{StatusCode: http.StatusNotFound, Header: header, Body: nil}, nil +} + +func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster { + return &clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + }, + } +} + +func createNode() *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""}, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "1.1.1.1", + }, + }, + }, + } +} + +func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *cpv1beta1.K0sControlPlane, *unstructured.Unstructured) { + kcpName := fmt.Sprintf("kcp-foo-%s", util.RandomString(6)) + + cluster := newCluster(&types.NamespacedName{Name: kcpName, Namespace: namespace}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneRef: &v1.ObjectReference{ + Kind: "K0sControlPlane", + Namespace: namespace, + Name: kcpName, + APIVersion: cpv1beta1.GroupVersion.String(), + }, + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "test.endpoint", + Port: 6443, + }, + } + + kcp := &cpv1beta1.K0sControlPlane{ + TypeMeta: metav1.TypeMeta{ + APIVersion: cpv1beta1.GroupVersion.String(), + Kind: "K0sControlPlane", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: kcpName, + UID: "1", + }, + }, + }, + Spec: v1beta1.K0sControlPlaneSpec{ + MachineTemplate: &v1beta1.K0sControlPlaneMachineTemplate{ + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: namespace, + Name: "infra-foo", + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + UpdateStrategy: cpv1beta1.UpdateRecreate, + Replicas: int32(1), + Version: "v1.30.0", + }, + } + + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": namespace, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: kcp.Spec.MachineTemplate.InfrastructureRef.Name, + clusterv1.TemplateClonedFromGroupKindAnnotation: kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String(), + }, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + return cluster, kcp, genericMachineTemplate +} diff --git a/internal/controller/controlplane/suite_test.go b/internal/controller/controlplane/suite_test.go new file mode 100644 index 000000000..cf2af4a22 --- /dev/null +++ b/internal/controller/controlplane/suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "os" + "testing" + + "github.com/k0sproject/k0smotron/internal/test/envtest" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + testEnv *envtest.Environment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + testEnv = envtest.Build(ctx) + code := m.Run() + testEnv.Teardown() + os.Exit(code) +} diff --git a/internal/controller/controlplane/util.go b/internal/controller/controlplane/util.go index 92fe0a083..8b3ecc407 100644 --- a/internal/controller/controlplane/util.go +++ b/internal/controller/controlplane/util.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/imdario/mergo" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -93,6 +94,10 @@ func (c *K0sController) createKubeconfigSecret(ctx context.Context, cfg *api.Con } func (c *K0sController) getKubeClient(ctx context.Context, cluster *clusterv1.Cluster) (*kubernetes.Clientset, error) { + if c.workloadClusterKubeClient != nil { + return c.workloadClusterKubeClient, nil + } + return k0smoutil.GetKubeClient(ctx, c.Client, cluster) } diff --git a/internal/controller/k0smotron.io/k0smotroncluster_etcd.go b/internal/controller/k0smotron.io/k0smotroncluster_etcd.go index e8e587a42..4f092cd99 100644 --- a/internal/controller/k0smotron.io/k0smotroncluster_etcd.go +++ b/internal/controller/k0smotron.io/k0smotroncluster_etcd.go @@ -20,10 +20,11 @@ import ( "bytes" "context" "fmt" - batchv1 "k8s.io/api/batch/v1" "strings" "text/template" + batchv1 "k8s.io/api/batch/v1" + km "github.com/k0sproject/k0smotron/api/k0smotron.io/v1beta1" apps "k8s.io/api/apps/v1" diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go new file mode 100644 index 000000000..31fc14ad5 --- /dev/null +++ b/internal/test/envtest/environment.go @@ -0,0 +1,274 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + goruntime "runtime" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/cmd/clusterctl/log" + "sigs.k8s.io/cluster-api/util/kubeconfig" + + bootstrapv1beta1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" + controlplanev1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + infrastructurev1beta1 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + k0smotronv1beta1 "github.com/k0sproject/k0smotron/api/k0smotron.io/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var ( + cacheSyncBackoff = wait.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 1.5, + Steps: 8, + Jitter: 0.4, + } +) + +// Environment encapsulates a Kubernetes local test environment. +type Environment struct { + manager.Manager + client.Client + Config *rest.Config + env *envtest.Environment + cancel context.CancelFunc +} + +func init() { + logger := klog.Background() + // Use klog as the internal logger for this envtest environment. + log.SetLogger(logger) + // Additionally force all controllers to use the Ginkgo logger. + ctrl.SetLogger(logger) + // Add logger for ginkgo. + klog.SetOutput(ginkgo.GinkgoWriter) + + utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(k0smotronv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(bootstrapv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(controlplanev1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(infrastructurev1beta1.AddToScheme(scheme.Scheme)) +} + +func newEnvironment() *Environment { + _, filename, _, _ := goruntime.Caller(0) + root := path.Join(path.Dir(filename), "..", "..", "..") + + capiCoreCrdsPath := "" + if capiCoreCrdsPath = getFilePathToCAPICoreCRDs(); capiCoreCrdsPath == "" { + panic(fmt.Errorf("failed to retrieve cluster-api core crds path")) + } + + crdPaths := []string{ + capiCoreCrdsPath, + filepath.Join(root, "config", "clusterapi", "bootstrap", "bases"), + filepath.Join(root, "config", "clusterapi", "controlplane", "bases"), + filepath.Join(root, "config", "clusterapi", "infrastructure", "bases"), + filepath.Join(root, "config", "clusterapi", "k0smotron.io", "bases"), + } + + env := &envtest.Environment{ + ErrorIfCRDPathMissing: true, + CRDDirectoryPaths: crdPaths, + CRDs: []*apiextensionsv1.CustomResourceDefinition{ + genericInfrastructureMachineCRD, + genericInfrastructureMachineTemplateCRD, + }, + } + + if _, err := env.Start(); err != nil { + panic(err) + } + + options := manager.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &v1.ConfigMap{}, + &v1.Secret{}, + }, + // Use the cache for all Unstructured get/list calls. + Unstructured: true, + }, + }, + } + + mgr, err := ctrl.NewManager(env.Config, options) + if err != nil { + panic(fmt.Errorf("failed to start testenv manager: %w", err)) + } + + if kubeconfigPath := os.Getenv("CAPI_TEST_ENV_KUBECONFIG"); kubeconfigPath != "" { + klog.Infof("Writing test env kubeconfig to %q", kubeconfigPath) + config := kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + }) + if err := os.WriteFile(kubeconfigPath, config, 0o600); err != nil { + panic(errors.Wrapf(err, "failed to write the test env kubeconfig")) + } + } + + return &Environment{ + Manager: mgr, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + env: env, + } +} + +func Build(ctx context.Context) *Environment { + testEnv := newEnvironment() + go func() { + fmt.Println("Starting the manager") + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + + return testEnv +} + +func (e *Environment) Teardown() { + e.cancel() + if err := e.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop envtest: %v", err)) + } +} + +// StartManager starts the test controller against the local API server. +func (e *Environment) StartManager(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + e.cancel = cancel + return e.Manager.Start(ctx) +} + +// Stop stops the test environment. +func (e *Environment) Stop() error { + e.cancel() + return e.env.Stop() +} + +func getFilePathToCAPICoreCRDs() string { + packageName := "sigs.k8s.io/cluster-api" + packageConfig := &packages.Config{ + Mode: packages.NeedModule, + } + + pkgs, err := packages.Load(packageConfig, packageName) + if err != nil { + return "" + } + + return filepath.Join(pkgs[0].Module.Dir, "config", "crd", "bases") +} + +// CreateNamespace creates a new namespace with a generated name. +func (e *Environment) CreateNamespace(ctx context.Context, generateName string) (*v1.Namespace, error) { + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-", generateName), + Labels: map[string]string{ + "testenv/original-name": generateName, + }, + }, + } + if err := e.Client.Create(ctx, ns); err != nil { + return nil, err + } + + return ns, nil +} + +// Cleanup removes objects from the Environment. +func (e *Environment) Cleanup(ctx context.Context, objs ...client.Object) error { + errs := make([]error, 0, len(objs)) + for _, o := range objs { + err := e.Client.Delete(ctx, o) + if apierrors.IsNotFound(err) { + // If the object is not found, it must've been garbage collected + // already. For example, if we delete namespace first and then + // objects within it. + continue + } + errs = append(errs, err) + } + + return kerrors.NewAggregate(errs) +} + +// CleanupAndWait deletes all the given objects and waits for the cache to be updated accordingly. +// +// NOTE: Waiting for the cache to be updated helps in preventing test flakes due to the cache sync delays. +func (e *Environment) CleanupAndWait(ctx context.Context, objs ...client.Object) error { + if err := e.Cleanup(ctx, objs...); err != nil { + return err + } + + // Makes sure the cache is updated with the deleted object + errs := []error{} + for _, o := range objs { + // Ignoring namespaces because in testenv the namespace cleaner is not running. + if o.GetObjectKind().GroupVersionKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + continue + } + + oCopy := o.DeepCopyObject().(client.Object) + key := client.ObjectKeyFromObject(o) + err := wait.ExponentialBackoff( + cacheSyncBackoff, + func() (done bool, err error) { + if err := e.Get(ctx, key, oCopy); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }) + errs = append(errs, errors.Wrapf(err, "key %s, %s is not being deleted from the testenv client cache", o.GetObjectKind().GroupVersionKind().String(), key)) + } + return kerrors.NewAggregate(errs) +} diff --git a/internal/test/envtest/generic_provider.go b/internal/test/envtest/generic_provider.go new file mode 100644 index 000000000..6e54ac023 --- /dev/null +++ b/internal/test/envtest/generic_provider.go @@ -0,0 +1,73 @@ +package envtest + +import ( + "strings" + + "github.com/gobuffalo/flect" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/contract" +) + +var ( + // infrastructureGroupVersion is group version used for infrastructure objects. + infrastructureGroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta1"} + + // genericInfrastructureMachineCRD is a generic infrastructure machine CRD. + genericInfrastructureMachineCRD = generateCRD(infrastructureGroupVersion.WithKind("GenericInfrastructureMachine")) + + // genericInfrastructureMachineTemplateCRD is a generic infrastructure machine template CRD. + genericInfrastructureMachineTemplateCRD = generateCRD(infrastructureGroupVersion.WithKind("GenericInfrastructureMachineTemplate")) +) + +func generateCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefinition { + return &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apiextensionsv1.SchemeGroupVersion.String(), + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: contract.CalculateCRDName(gvk.Group, gvk.Kind), + Labels: map[string]string{ + clusterv1.GroupVersion.String(): "v1beta1", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gvk.Group, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: gvk.Kind, + Plural: flect.Pluralize(strings.ToLower(gvk.Kind)), + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: gvk.Version, + Served: true, + Storage: true, + Subresources: &apiextensionsv1.CustomResourceSubresources{ + Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, + }, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "spec": { + Type: "object", + XPreserveUnknownFields: ptr.To(true), + }, + "status": { + Type: "object", + XPreserveUnknownFields: ptr.To(true), + }, + }, + }, + }, + }, + }, + }, + } +}