diff --git a/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go b/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go index 7fa62ce3a1..1e1353bc67 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go @@ -27,6 +27,7 @@ import ( nodemaintenancev1beta1 "github.com/edgelesssys/constellation/v2/3rdparty/node-maintenance-operator/api/v1beta1" mainconstants "github.com/edgelesssys/constellation/v2/internal/constants" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" + "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/constants" ) var _ = Describe("NodeVersion controller", func() { @@ -38,10 +39,15 @@ var _ = Describe("NodeVersion controller", func() { // Define utility constants for object names and testing timeouts/durations and intervals. const ( - nodeVersionResourceName = "nodeversion" - firstNodeName = "node-1" - secondNodeName = "node-2" - scalingGroupID = "scaling-group" + nodeVersionResourceName = "nodeversion" + firstWorkerNodeName = "worker-node-1" + secondWorkerNodeName = "worker-node-2" + thirdWorkerNodeName = "worker-node-3" + firstControlPlaneNodeName = "control-plane-node-1" + secondControlPlaneNodeName = "control-plane-node-2" + thirdControlPlaneNodeName = "control-plane-node-3" + scalingGroupIDWorker = "scaling-group-worker" + scalingGroupIDControlPlane = "scaling-group-control-plane" timeout = time.Second * 20 duration = time.Second * 2 @@ -52,17 +58,17 @@ var _ = Describe("NodeVersion controller", func() { KubernetesComponentsReference: "ref-1", } - firstNodeLookupKey := types.NamespacedName{Name: firstNodeName} - secondNodeLookupKey := types.NamespacedName{Name: secondNodeName} + firstNodeLookupKeyWorker := types.NamespacedName{Name: firstWorkerNodeName} + secondNodeLookupKeyWorker := types.NamespacedName{Name: secondWorkerNodeName} + firstNodeLookupKeyControlPlane := types.NamespacedName{Name: firstControlPlaneNodeName} + secondNodeLookupKeyControlPlane := types.NamespacedName{Name: secondControlPlaneNodeName} nodeVersionLookupKey := types.NamespacedName{Name: nodeVersionResourceName} - scalingGroupLookupKey := types.NamespacedName{Name: scalingGroupID} - joiningPendingNodeLookupKey := types.NamespacedName{Name: secondNodeName} - nodeMaintenanceLookupKey := types.NamespacedName{Name: firstNodeName} + scalingGroupLookupKeyWorker := types.NamespacedName{Name: scalingGroupIDWorker} + scalingGroupLookupKeyControlPlane := types.NamespacedName{Name: scalingGroupIDControlPlane} Context("When updating the cluster-wide node version", func() { - testNodeVersionUpdate := func(newNodeVersionSpec updatev1alpha1.NodeVersionSpec) { + testNodeVersionUpdate := func(newNodeVersionSpec updatev1alpha1.NodeVersionSpec, workersFirst bool) { By("creating a node version resource specifying the first node version") - Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstVersionSpec.ImageReference)).Should(Succeed()) nodeVersion := &updatev1alpha1.NodeVersion{ TypeMeta: metav1.TypeMeta{ APIVersion: "update.edgeless.systems/v1alpha1", @@ -75,16 +81,48 @@ var _ = Describe("NodeVersion controller", func() { } Expect(k8sClient.Create(ctx, nodeVersion)).Should(Succeed()) - By("creating a node resource using the first node image") - fakes.nodeReplacer.setNodeImage(firstNodeName, firstVersionSpec.ImageReference) - fakes.nodeReplacer.setScalingGroupID(firstNodeName, scalingGroupID) - firstNode := &corev1.Node{ + By("creating a control plane node resource using the first node image") + fakes.nodeReplacer.setNodeImage(firstControlPlaneNodeName, firstVersionSpec.ImageReference) + fakes.nodeReplacer.setScalingGroupID(firstControlPlaneNodeName, scalingGroupIDControlPlane) + firstControlPlaneNode := &corev1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ - Name: firstNodeName, + Name: firstControlPlaneNodeName, + Labels: map[string]string{ + "custom-node-label": "custom-node-label-value", + constants.ControlPlaneRoleLabel: "", + }, + Annotations: map[string]string{ + mainconstants.NodeKubernetesComponentsAnnotationKey: firstVersionSpec.KubernetesComponentsReference, + }, + }, + Spec: corev1.NodeSpec{ + ProviderID: firstControlPlaneNodeName, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.0.2.1", + }, + }, + }, + } + Expect(k8sClient.Create(ctx, firstControlPlaneNode)).Should(Succeed()) + + By("creating a worker node resource using the first node image") + fakes.nodeReplacer.setNodeImage(firstWorkerNodeName, firstVersionSpec.ImageReference) + fakes.nodeReplacer.setScalingGroupID(firstWorkerNodeName, scalingGroupIDWorker) + firstWorkerNode := &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: firstWorkerNodeName, Labels: map[string]string{ "custom-node-label": "custom-node-label-value", }, @@ -93,24 +131,40 @@ var _ = Describe("NodeVersion controller", func() { }, }, Spec: corev1.NodeSpec{ - ProviderID: firstNodeName, + ProviderID: firstWorkerNodeName, }, } - Expect(k8sClient.Create(ctx, firstNode)).Should(Succeed()) + Expect(k8sClient.Create(ctx, firstWorkerNode)).Should(Succeed()) - By("creating a scaling group resource using the first node image") - Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstVersionSpec.ImageReference)).Should(Succeed()) - scalingGroup := &updatev1alpha1.ScalingGroup{ + By("creating worker scaling group resource using the first node image") + Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupIDWorker, firstVersionSpec.ImageReference)).Should(Succeed()) + scalingGroupWorker := &updatev1alpha1.ScalingGroup{ ObjectMeta: metav1.ObjectMeta{ - Name: scalingGroupID, + Name: scalingGroupIDWorker, }, Spec: updatev1alpha1.ScalingGroupSpec{ NodeVersion: nodeVersionResourceName, - GroupID: scalingGroupID, + GroupID: scalingGroupIDWorker, Autoscaling: true, + Role: updatev1alpha1.WorkerRole, }, } - Expect(k8sClient.Create(ctx, scalingGroup)).Should(Succeed()) + Expect(k8sClient.Create(ctx, scalingGroupWorker)).Should(Succeed()) + + By("creating control plane scaling group resource using the first node image") + Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupIDControlPlane, firstVersionSpec.ImageReference)).Should(Succeed()) + scalingGroupControlPlane := &updatev1alpha1.ScalingGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalingGroupIDControlPlane, + }, + Spec: updatev1alpha1.ScalingGroupSpec{ + NodeVersion: nodeVersionResourceName, + GroupID: scalingGroupIDControlPlane, + Autoscaling: true, + Role: updatev1alpha1.ControlPlaneRole, + }, + } + Expect(k8sClient.Create(ctx, scalingGroupControlPlane)).Should(Succeed()) By("creating a cluster-autoscaler deployment") ctx := context.Background() @@ -167,11 +221,20 @@ var _ = Describe("NodeVersion controller", func() { return 0 } return len(nodeVersion.Status.UpToDate) - }, timeout, interval).Should(Equal(1)) + }, timeout, interval).Should(Equal(2)) By("updating the node version") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateReady) - fakes.nodeReplacer.setCreatedNode(secondNodeName, secondNodeName, nil) + fakes.nodeReplacer.addCreatedNode(scalingGroupIDControlPlane, secondControlPlaneNodeName, secondControlPlaneNodeName, nil) + fakes.nodeReplacer.addCreatedNode(scalingGroupIDWorker, secondWorkerNodeName, secondWorkerNodeName, nil) + fakes.nodeStateGetter.setNodeState(secondControlPlaneNodeName, updatev1alpha1.NodeStateReady) + fakes.nodeStateGetter.setNodeState(secondWorkerNodeName, updatev1alpha1.NodeStateReady) + // When the pending node CR that the nodeversion loop creates is not detected on the second iteration e.g., because of delay in the KubeAPI + // it creates a second node via the cloud provider api. This is fine because the pending node/mint node is not matched if it's not needed + // and the nodeversion loop will clean up the mint node. + fakes.nodeStateGetter.setNodeState(thirdControlPlaneNodeName, updatev1alpha1.NodeStateCreating) + fakes.nodeStateGetter.setNodeState(thirdWorkerNodeName, updatev1alpha1.NodeStateCreating) + fakes.nodeReplacer.addCreatedNode(scalingGroupIDControlPlane, thirdControlPlaneNodeName, thirdControlPlaneNodeName, nil) + fakes.nodeReplacer.addCreatedNode(scalingGroupIDWorker, thirdWorkerNodeName, thirdWorkerNodeName, nil) // Eventually the node version with the new NodeVersion spec. Eventually(func() error { if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { @@ -181,172 +244,216 @@ var _ = Describe("NodeVersion controller", func() { return k8sClient.Update(ctx, nodeVersion) }, timeout, interval).Should(Succeed()) - By("checking that there is an outdated node in the status") + By("checking that there are 2 outdated node in the status") Eventually(func() int { if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { return 0 } return len(nodeVersion.Status.Outdated) - }, timeout, interval).Should(Equal(1), "outdated nodes should be 1") + }, timeout, interval).Should(Equal(2), "outdated nodes should be 2") - By("checking that the scaling group is up to date") + By("checking that the control plane scaling group is up to date") Eventually(func() string { - if err := k8sClient.Get(ctx, scalingGroupLookupKey, scalingGroup); err != nil { + if err := k8sClient.Get(ctx, scalingGroupLookupKeyControlPlane, scalingGroupControlPlane); err != nil { return "" } - return scalingGroup.Status.ImageReference + return scalingGroupControlPlane.Status.ImageReference }, timeout, interval).Should(Equal(newNodeVersionSpec.ImageReference)) - By("checking that a pending node is created") - pendingNode := &updatev1alpha1.PendingNode{} - Eventually(func() error { - return k8sClient.Get(ctx, joiningPendingNodeLookupKey, pendingNode) - }, timeout, interval).Should(Succeed()) - Eventually(func() updatev1alpha1.CSPNodeState { - _ = k8sClient.Get(ctx, joiningPendingNodeLookupKey, pendingNode) - return pendingNode.Status.CSPNodeState - }, timeout, interval).Should(Equal(updatev1alpha1.NodeStateReady)) - Eventually(func() int { - if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { - return 0 - } - return len(nodeVersion.Status.Pending) - }, timeout, interval).Should(Equal(1)) - - By("creating a new node resource using the image from the new node version") - fakes.nodeReplacer.setNodeImage(secondNodeName, newNodeVersionSpec.ImageReference) - fakes.nodeReplacer.setScalingGroupID(secondNodeName, scalingGroupID) - secondNode := &corev1.Node{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Node", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: secondNodeName, - }, - Spec: corev1.NodeSpec{ - ProviderID: secondNodeName, - }, - } - Expect(k8sClient.Create(ctx, secondNode)).Should(Succeed()) - - By("marking the new node as AwaitingAnnotation") - Eventually(func() int { - err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion) - if err != nil { - return 0 - } - return len(nodeVersion.Status.AwaitingAnnotation) - }, timeout, interval).Should(Equal(1)) - // add a JoiningNode CR for the new node - joiningNode := &updatev1alpha1.JoiningNode{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "update.edgeless.systems/v1alpha1", - Kind: "JoiningNode", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: secondNodeName, - }, - Spec: updatev1alpha1.JoiningNodeSpec{ - Name: secondNodeName, - ComponentsReference: newNodeVersionSpec.KubernetesComponentsReference, - }, - } - Expect(k8sClient.Create(ctx, joiningNode)).Should(Succeed()) - Eventually(func() int { - err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion) - if err != nil { - return 1 - } - return len(nodeVersion.Status.AwaitingAnnotation) - }, timeout, interval).Should(Equal(0)) - - By("checking that the new node is properly annotated") - Eventually(func() error { - if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { - return err - } - // check nodeImageAnnotation annotation - if _, ok := secondNode.Annotations[nodeImageAnnotation]; !ok { - return fmt.Errorf("node %s is missing %s annotation", secondNode.Name, nodeImageAnnotation) - } - // check mainconstants.NodeKubernetesComponentsAnnotationKey annotation - if _, ok := secondNode.Annotations[mainconstants.NodeKubernetesComponentsAnnotationKey]; !ok { - return fmt.Errorf("node %s is missing %s annotation", secondNode.Name, mainconstants.NodeKubernetesComponentsAnnotationKey) - } - return nil - }, timeout, interval).Should(Succeed()) - - By("checking that the nodes are paired as donor and heir") - Eventually(func() map[string]string { - if err := k8sClient.Get(ctx, firstNodeLookupKey, firstNode); err != nil { - return nil - } - return firstNode.Annotations - }, timeout, interval).Should(HaveKeyWithValue(heirAnnotation, secondNodeName)) - Eventually(func() map[string]string { - if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { - return nil - } - return secondNode.Annotations - }, timeout, interval).Should(HaveKeyWithValue(donorAnnotation, firstNodeName)) - - Eventually(func() error { - if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { - return err - } - if len(nodeVersion.Status.Donors) != 1 { - return fmt.Errorf("node version %s has %d donors, expected 1", nodeVersion.Name, len(nodeVersion.Status.Donors)) - } - if len(nodeVersion.Status.Heirs) != 1 { - return fmt.Errorf("node version %s has %d heirs, expected 1", nodeVersion.Name, len(nodeVersion.Status.Heirs)) + By("checking that the worker scaling group is up to date") + Eventually(func() string { + if err := k8sClient.Get(ctx, scalingGroupLookupKeyWorker, scalingGroupWorker); err != nil { + return "" } - return nil - }, timeout, interval).Should(Succeed()) - Expect(k8sClient.Get(ctx, joiningPendingNodeLookupKey, pendingNode)).Should(Not(Succeed())) + return scalingGroupWorker.Status.ImageReference + }, timeout, interval).Should(Equal(newNodeVersionSpec.ImageReference)) - By("checking that node labels are copied to the heir") - Eventually(func() map[string]string { - if err := k8sClient.Get(ctx, firstNodeLookupKey, firstNode); err != nil { + replaceNodeTest := func(firstNodeLookupKey, secondNodeLookupKey types.NamespacedName, firstNodeName, secondNodeName, scalingGroupID string, firstNode *corev1.Node, expectPendingNode bool) *corev1.Node { + By("checking that the pending node is created") + pendingNode := &updatev1alpha1.PendingNode{} + // If we try to upgrade the worker nodes first, we expect here that the worker node is not created + if !expectPendingNode { + Consistently(func() bool { + if err := k8sClient.Get(ctx, secondNodeLookupKey, pendingNode); err != nil { + return false + } + return true + }, duration, interval).Should(BeFalse()) return nil } - return firstNode.Labels - }, timeout, interval).Should(HaveKeyWithValue("custom-node-label", "custom-node-label-value")) - - By("marking the new node as ready") - Eventually(func() error { - if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { - return err + Eventually(func() bool { + if err := k8sClient.Get(ctx, secondNodeLookupKey, pendingNode); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + + Eventually(func() updatev1alpha1.CSPNodeState { + _ = k8sClient.Get(ctx, secondNodeLookupKey, pendingNode) + return pendingNode.Status.CSPNodeState + }, timeout, interval).Should(Equal(updatev1alpha1.NodeStateReady)) + Eventually(func() bool { + if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { + return false + } + return len(nodeVersion.Status.Pending) >= 1 + }, timeout, interval).Should(BeTrue()) + + By("creating a new node resource using the image from the new node version") + fakes.nodeReplacer.setNodeImage(secondNodeName, newNodeVersionSpec.ImageReference) + fakes.nodeReplacer.setScalingGroupID(secondNodeName, scalingGroupID) + var labels map[string]string + if _, ok := firstNode.Labels[constants.ControlPlaneRoleLabel]; ok { + labels = map[string]string{ + constants.ControlPlaneRoleLabel: "", + } } - secondNode.Status.Conditions = []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, + secondNode := &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secondNodeName, + Labels: labels, + }, + Spec: corev1.NodeSpec{ + ProviderID: secondNodeName, }, } - return k8sClient.Status().Update(ctx, secondNode) - }, timeout, interval).Should(Succeed()) - - By("waiting for a NodeMaintenance resource to be created") - nodeMaintenance := &nodemaintenancev1beta1.NodeMaintenance{} - Eventually(func() error { - return k8sClient.Get(ctx, nodeMaintenanceLookupKey, nodeMaintenance) - }, timeout, interval).Should(Succeed()) - - By("marking the NodeMaintenance as successful") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateTerminated) - Eventually(func() error { - if err := k8sClient.Get(ctx, nodeMaintenanceLookupKey, nodeMaintenance); err != nil { - return err + Expect(k8sClient.Create(ctx, secondNode)).Should(Succeed()) + + By("marking the new node as AwaitingAnnotation") + Eventually(func() int { + err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion) + if err != nil { + return 0 + } + return len(nodeVersion.Status.AwaitingAnnotation) + }, timeout, interval).Should(Equal(1)) + // add a JoiningNode CR for the new node + joiningNode := &updatev1alpha1.JoiningNode{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "update.edgeless.systems/v1alpha1", + Kind: "JoiningNode", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secondNodeName, + }, + Spec: updatev1alpha1.JoiningNodeSpec{ + Name: secondNodeName, + ComponentsReference: newNodeVersionSpec.KubernetesComponentsReference, + }, } - nodeMaintenance.Status.Phase = nodemaintenancev1beta1.MaintenanceSucceeded - return k8sClient.Status().Update(ctx, nodeMaintenance) - }, timeout, interval).Should(Succeed()) + Expect(k8sClient.Create(ctx, joiningNode)).Should(Succeed()) + Eventually(func() int { + err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion) + if err != nil { + return 1 + } + return len(nodeVersion.Status.AwaitingAnnotation) + }, timeout, interval).Should(Equal(0)) + + By("checking that the new node is properly annotated") + Eventually(func() error { + if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { + return err + } + // check nodeImageAnnotation annotation + if _, ok := secondNode.Annotations[nodeImageAnnotation]; !ok { + return fmt.Errorf("node %s is missing %s annotation", secondNode.Name, nodeImageAnnotation) + } + // check mainconstants.NodeKubernetesComponentsAnnotationKey annotation + if _, ok := secondNode.Annotations[mainconstants.NodeKubernetesComponentsAnnotationKey]; !ok { + return fmt.Errorf("node %s is missing %s annotation", secondNode.Name, mainconstants.NodeKubernetesComponentsAnnotationKey) + } + return nil + }, timeout, interval).Should(Succeed()) + + By("checking that the nodes are paired as donor and heir") + Eventually(func() map[string]string { + if err := k8sClient.Get(ctx, firstNodeLookupKey, firstNode); err != nil { + return nil + } + return firstNode.Annotations + }, timeout, interval).Should(HaveKeyWithValue(heirAnnotation, secondNodeName)) + Eventually(func() map[string]string { + if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { + return nil + } + return secondNode.Annotations + }, timeout, interval).Should(HaveKeyWithValue(donorAnnotation, firstNodeName)) + + Eventually(func() error { + if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil { + return err + } + if len(nodeVersion.Status.Donors) != 1 { + return fmt.Errorf("node version %s has %d donors, expected 1", nodeVersion.Name, len(nodeVersion.Status.Donors)) + } + if len(nodeVersion.Status.Heirs) != 1 { + return fmt.Errorf("node version %s has %d heirs, expected 1", nodeVersion.Name, len(nodeVersion.Status.Heirs)) + } + return nil + }, timeout, interval).Should(Succeed()) + Expect(k8sClient.Get(ctx, secondNodeLookupKey, pendingNode)).Should(Not(Succeed())) + + By("checking that node labels are copied to the heir") + Eventually(func() map[string]string { + if err := k8sClient.Get(ctx, firstNodeLookupKey, firstNode); err != nil { + return nil + } + return firstNode.Labels + }, timeout, interval).Should(HaveKeyWithValue("custom-node-label", "custom-node-label-value")) + + By("marking the new node as ready") + Eventually(func() error { + if err := k8sClient.Get(ctx, secondNodeLookupKey, secondNode); err != nil { + return err + } + secondNode.Status.Conditions = []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + } + return k8sClient.Status().Update(ctx, secondNode) + }, timeout, interval).Should(Succeed()) + + By("waiting for a NodeMaintenance resource to be created") + nodeMaintenance := &nodemaintenancev1beta1.NodeMaintenance{} + Eventually(func() error { + return k8sClient.Get(ctx, firstNodeLookupKey, nodeMaintenance) + }, timeout, interval).Should(Succeed()) + + By("marking the NodeMaintenance as successful") + fakes.nodeStateGetter.setNodeState(firstNodeName, updatev1alpha1.NodeStateTerminated) + Eventually(func() error { + if err := k8sClient.Get(ctx, firstNodeLookupKey, nodeMaintenance); err != nil { + return err + } + nodeMaintenance.Status.Phase = nodemaintenancev1beta1.MaintenanceSucceeded + return k8sClient.Status().Update(ctx, nodeMaintenance) + }, timeout, interval).Should(Succeed()) + + By("checking that the outdated node is removed") + Eventually(func() error { + return k8sClient.Get(ctx, firstNodeLookupKey, firstNode) + }, timeout, interval).Should(Not(Succeed())) + + return secondNode + } - By("checking that the outdated node is removed") - Eventually(func() error { - return k8sClient.Get(ctx, firstNodeLookupKey, firstNode) - }, timeout, interval).Should(Not(Succeed())) + var createdControlPlane *corev1.Node + var createdWorkerNode *corev1.Node + if workersFirst { + _ = replaceNodeTest(firstNodeLookupKeyWorker, secondNodeLookupKeyWorker, firstWorkerNodeName, secondWorkerNodeName, scalingGroupIDWorker, firstWorkerNode, false) + createdControlPlane = replaceNodeTest(firstNodeLookupKeyControlPlane, secondNodeLookupKeyControlPlane, firstControlPlaneNodeName, secondControlPlaneNodeName, scalingGroupIDControlPlane, firstControlPlaneNode, true) + createdWorkerNode = replaceNodeTest(firstNodeLookupKeyWorker, secondNodeLookupKeyWorker, firstWorkerNodeName, secondWorkerNodeName, scalingGroupIDWorker, firstWorkerNode, true) + } else { + createdControlPlane = replaceNodeTest(firstNodeLookupKeyControlPlane, secondNodeLookupKeyControlPlane, firstControlPlaneNodeName, secondControlPlaneNodeName, scalingGroupIDControlPlane, firstControlPlaneNode, true) + createdWorkerNode = replaceNodeTest(firstNodeLookupKeyWorker, secondNodeLookupKeyWorker, firstWorkerNodeName, secondWorkerNodeName, scalingGroupIDWorker, firstWorkerNode, true) + } By("checking that all nodes are up-to-date") Eventually(func() int { @@ -355,29 +462,48 @@ var _ = Describe("NodeVersion controller", func() { return 0 } return len(nodeVersion.Status.UpToDate) - }, timeout, interval).Should(Equal(1)) + }, timeout, interval).Should(Equal(2)) By("cleaning up all resources") Expect(k8sClient.Delete(ctx, nodeVersion)).Should(Succeed()) - Expect(k8sClient.Delete(ctx, scalingGroup)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, scalingGroupWorker)).Should(Succeed()) Expect(k8sClient.Delete(ctx, autoscalerDeployment)).Should(Succeed()) Expect(k8sClient.Delete(ctx, strategy)).Should(Succeed()) - Expect(k8sClient.Delete(ctx, secondNode)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, createdControlPlane)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, createdWorkerNode)).Should(Succeed()) } When("Updating the image reference", func() { It("Should update every node in the cluster", func() { - testNodeVersionUpdate(updatev1alpha1.NodeVersionSpec{ - ImageReference: "version-2", - KubernetesComponentsReference: "ref-1", - }) + testNodeVersionUpdate( + updatev1alpha1.NodeVersionSpec{ + ImageReference: "version-2", + KubernetesComponentsReference: "ref-1", + }, + false, + ) }) }) When("Updating the Kubernetes components reference", func() { It("Should update every node in the cluster", func() { - testNodeVersionUpdate(updatev1alpha1.NodeVersionSpec{ - ImageReference: "version-1", - KubernetesComponentsReference: "ref-2", - }) + testNodeVersionUpdate( + updatev1alpha1.NodeVersionSpec{ + ImageReference: "version-1", + KubernetesComponentsReference: "ref-2", + }, + false, + ) + }) + }) + When("Updating the Kubernetes components reference and wanting to upgrade the worker nodes first", func() { + It("should fail to update the worker nodes before the control plane nodes", func() { + testNodeVersionUpdate( + updatev1alpha1.NodeVersionSpec{ + ImageReference: "version-1", + KubernetesComponentsReference: "ref-2", + MaxNodeBudget: 2, + }, + true, + ) }) }) }) diff --git a/operators/constellation-node-operator/controllers/nodeversion_controller_test.go b/operators/constellation-node-operator/controllers/nodeversion_controller_test.go index b30bbc4191..fdb14e123f 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_controller_test.go +++ b/operators/constellation-node-operator/controllers/nodeversion_controller_test.go @@ -9,6 +9,7 @@ package controllers import ( "context" "errors" + "strconv" "sync" "testing" @@ -21,6 +22,7 @@ import ( mainconstants "github.com/edgelesssys/constellation/v2/internal/constants" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" + "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/constants" ) func TestAnnotateNodes(t *testing.T) { @@ -800,16 +802,22 @@ func TestGroupNodes(t *testing.T) { assert.Equal(wantNodeGroups, groups) } +// stubNode contains all information usually associated with a node freshly +// created by the cloud provider. +type stubNode struct { + name string + providerID string +} + type stubNodeReplacer struct { sync.RWMutex - nodeImages map[string]string - scalingGroups map[string]string - createNodeName string - createProviderID string - nodeImageErr error - scalingGroupIDErr error - createErr error - deleteErr error + nodeImages map[string]string + scalingGroups map[string]string + createNodesByScalingGroupID map[string][]stubNode + nodeImageErr error + scalingGroupIDErr error + createErr error + deleteErr error } func (r *stubNodeReplacer) GetNodeImage(_ context.Context, providerID string) (string, error) { @@ -824,10 +832,25 @@ func (r *stubNodeReplacer) GetScalingGroupID(_ context.Context, providerID strin return r.scalingGroups[providerID], r.scalingGroupIDErr } -func (r *stubNodeReplacer) CreateNode(_ context.Context, _ string) (nodeName, providerID string, err error) { +// CreateNode stubs the cloud provider API call to create a node. +func (r *stubNodeReplacer) CreateNode(_ context.Context, scalingGroupID string) (nodeName, providerID string, err error) { r.RLock() defer r.RUnlock() - return r.createNodeName, r.createProviderID, r.createErr + nodes, ok := r.createNodesByScalingGroupID[scalingGroupID] + if !ok { + panic("unexpected call to CreateNode with scaling group ID " + scalingGroupID + ", existing scaling group IDs: " + strconv.Itoa(len(r.createNodesByScalingGroupID))) + } + if len(nodes) == 0 { + panic("unexpected call to CreateNode with scaling group ID " + scalingGroupID + ", no nodes left") + } + + nodeName = nodes[0].name + providerID = nodes[0].providerID + err = r.createErr + + r.createNodesByScalingGroupID[scalingGroupID] = nodes[1:] + + return } func (r *stubNodeReplacer) DeleteNode(_ context.Context, _ string) error { @@ -856,11 +879,13 @@ func (r *stubNodeReplacer) setScalingGroupID(providerID, scalingGroupID string) r.scalingGroups[providerID] = scalingGroupID } -func (r *stubNodeReplacer) setCreatedNode(nodeName, providerID string, err error) { +func (r *stubNodeReplacer) addCreatedNode(scalingGroupID, nodeName, providerID string, err error) { r.Lock() defer r.Unlock() - r.createNodeName = nodeName - r.createProviderID = providerID + if r.createNodesByScalingGroupID == nil { + r.createNodesByScalingGroupID = make(map[string][]stubNode) + } + r.createNodesByScalingGroupID[scalingGroupID] = append(r.createNodesByScalingGroupID[scalingGroupID], stubNode{nodeName, providerID}) r.createErr = err } @@ -869,8 +894,7 @@ func (r *stubNodeReplacer) reset() { defer r.Unlock() r.nodeImages = nil r.scalingGroups = nil - r.createNodeName = "" - r.createProviderID = "" + r.createNodesByScalingGroupID = nil r.createErr = nil r.deleteErr = nil } diff --git a/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go b/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go index 05e5de4eee..d9a6416d06 100644 --- a/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go @@ -64,7 +64,7 @@ var _ = Describe("PendingNode controller", func() { Context("When creating pending node with goal join", func() { It("Should terminate the node after failing to join by the deadline", func() { By("setting the CSP node state to creating") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateCreating) + fakes.nodeStateGetter.setNodeState(pendingNodeName, updatev1alpha1.NodeStateCreating) By("creating a pending node resource") ctx := context.Background() @@ -77,7 +77,7 @@ var _ = Describe("PendingNode controller", func() { Name: pendingNodeName, }, Spec: updatev1alpha1.PendingNodeSpec{ - ProviderID: "provider-id", + ProviderID: pendingNodeName, ScalingGroupID: "scaling-group-id", NodeName: "test-node", Goal: updatev1alpha1.NodeGoalJoin, @@ -120,7 +120,7 @@ var _ = Describe("PendingNode controller", func() { }, timeout, interval).Should(Equal(updatev1alpha1.NodeGoalLeave)) By("setting the CSP node state to terminated") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateTerminated) + fakes.nodeStateGetter.setNodeState(pendingNodeName, updatev1alpha1.NodeStateTerminated) // trigger reconciliation before regular check interval to speed up test by changing the spec Eventually(func() error { if err := k8sClient.Get(ctx, pendingNodeLookupKey, pendingNode); err != nil { @@ -138,7 +138,7 @@ var _ = Describe("PendingNode controller", func() { It("Should should detect successful node join", func() { By("setting the CSP node state to creating") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateCreating) + fakes.nodeStateGetter.setNodeState(pendingNodeName, updatev1alpha1.NodeStateCreating) By("creating a pending node resource") ctx := context.Background() @@ -151,7 +151,7 @@ var _ = Describe("PendingNode controller", func() { Name: pendingNodeName, }, Spec: updatev1alpha1.PendingNodeSpec{ - ProviderID: "provider-id", + ProviderID: pendingNodeName, ScalingGroupID: "scaling-group-id", NodeName: "test-node", Goal: updatev1alpha1.NodeGoalJoin, @@ -178,7 +178,7 @@ var _ = Describe("PendingNode controller", func() { }, timeout, interval).Should(Equal(updatev1alpha1.NodeStateCreating)) By("setting the CSP node state to ready") - fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateReady) + fakes.nodeStateGetter.setNodeState(pendingNodeName, updatev1alpha1.NodeStateReady) By("creating a new node resource with the same node name and provider ID") node := &corev1.Node{ @@ -190,7 +190,7 @@ var _ = Describe("PendingNode controller", func() { Name: "test-node", }, Spec: corev1.NodeSpec{ - ProviderID: "provider-id", + ProviderID: pendingNodeName, }, } Expect(k8sClient.Create(ctx, node)).Should(Succeed()) diff --git a/operators/constellation-node-operator/controllers/pendingnode_controller_test.go b/operators/constellation-node-operator/controllers/pendingnode_controller_test.go index 1a564af768..d5ca55fe6d 100644 --- a/operators/constellation-node-operator/controllers/pendingnode_controller_test.go +++ b/operators/constellation-node-operator/controllers/pendingnode_controller_test.go @@ -231,15 +231,20 @@ func TestReachedGoal(t *testing.T) { type stubNodeStateGetter struct { sync.RWMutex - nodeState updatev1alpha1.CSPNodeState + nodeStates map[string]updatev1alpha1.CSPNodeState nodeStateErr error deleteNodeErr error } -func (g *stubNodeStateGetter) GetNodeState(_ context.Context, _ string) (updatev1alpha1.CSPNodeState, error) { +func (g *stubNodeStateGetter) GetNodeState(_ context.Context, providerID string) (updatev1alpha1.CSPNodeState, error) { g.RLock() defer g.RUnlock() - return g.nodeState, g.nodeStateErr + + if _, ok := g.nodeStates[providerID]; !ok { + panic("unexpected call to GetNodeState") + } + + return g.nodeStates[providerID], g.nodeStateErr } func (g *stubNodeStateGetter) DeleteNode(_ context.Context, _ string) error { @@ -250,8 +255,19 @@ func (g *stubNodeStateGetter) DeleteNode(_ context.Context, _ string) error { // thread safe methods to update the stub while in use -func (g *stubNodeStateGetter) setNodeState(nodeState updatev1alpha1.CSPNodeState) { +func (g *stubNodeStateGetter) setNodeState(providerID string, nodeState updatev1alpha1.CSPNodeState) { + g.Lock() + defer g.Unlock() + if g.nodeStates == nil { + g.nodeStates = make(map[string]updatev1alpha1.CSPNodeState) + } + g.nodeStates[providerID] = nodeState +} + +func (g *stubNodeStateGetter) reset() { g.Lock() defer g.Unlock() - g.nodeState = nodeState + g.nodeStates = nil + g.nodeStateErr = nil + g.deleteNodeErr = nil } diff --git a/operators/constellation-node-operator/controllers/suite_test.go b/operators/constellation-node-operator/controllers/suite_test.go index 7b4e232181..eafc162814 100644 --- a/operators/constellation-node-operator/controllers/suite_test.go +++ b/operators/constellation-node-operator/controllers/suite_test.go @@ -154,7 +154,7 @@ type fakeCollection struct { func (c *fakeCollection) reset() { c.scalingGroupUpdater.reset() - c.nodeStateGetter.setNodeState("") + c.nodeStateGetter.reset() c.nodeReplacer.reset() }