diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 8a582f561..e5c2d0527 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -28,6 +28,7 @@ import ( "go.goms.io/fleet/pkg/controllers/clusterinventory/clusterprofile" "go.goms.io/fleet/pkg/controllers/clusterresourcebindingwatcher" "go.goms.io/fleet/pkg/controllers/clusterresourceplacement" + "go.goms.io/fleet/pkg/controllers/clusterresourceplacementeviction" "go.goms.io/fleet/pkg/controllers/clusterresourceplacementwatcher" "go.goms.io/fleet/pkg/controllers/clusterschedulingpolicysnapshot" "go.goms.io/fleet/pkg/controllers/memberclusterplacement" @@ -206,6 +207,14 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } + klog.Info("Setting up cluster resource placement eviction controller") + if err := (&clusterresourceplacementeviction.Reconciler{ + Client: mgr.GetClient(), + }).SetupWithManager(mgr); err != nil { + klog.ErrorS(err, "Unable to set up cluster resource placement eviction controller") + return err + } + // Set up the work generator klog.Info("Setting up work generator") if err := (&workgenerator.Reconciler{ diff --git a/pkg/controllers/clusterresourceplacementeviction/controller.go b/pkg/controllers/clusterresourceplacementeviction/controller.go index da1d83c37..1ab40ffa7 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller.go @@ -28,27 +28,6 @@ import ( "go.goms.io/fleet/pkg/utils/defaulter" ) -const ( - clusterResourcePlacementEvictionValidReason = "ClusterResourcePlacementEvictionValid" - clusterResourcePlacementEvictionInvalidReason = "ClusterResourcePlacementEvictionInvalid" - clusterResourcePlacementEvictionExecutedReason = "ClusterResourcePlacementEvictionExecuted" - clusterResourcePlacementEvictionNotExecutedReason = "ClusterResourcePlacementEvictionNotExecuted" - - evictionInvalidMissingCRPMessage = "Failed to find ClusterResourcePlacement targeted by eviction" - evictionInvalidDeletingCRPMessage = "Found deleting ClusterResourcePlacement targeted by eviction" - evictionInvalidMissingCRBMessage = "Failed to find scheduler decision for placement in cluster targeted by eviction" - evictionInvalidMultipleCRBMessage = "Found more than one scheduler decision for placement in cluster targeted by eviction" - evictionValidMessage = "Eviction is valid" - evictionAllowedNoPDBMessage = "Eviction is allowed, no ClusterResourcePlacementDisruptionBudget specified" - evictionAllowedPlacementRemovedMessage = "Eviction is allowed, resources propagated by placement is currently being removed from cluster targeted by eviction" - evictionAllowedPlacementFailedMessage = "Eviction is allowed, placement has failed" - evictionBlockedMisconfiguredPDBSpecifiedMessage = "Eviction is blocked by misconfigured ClusterResourcePlacementDisruptionBudget, either MaxUnavailable is specified or MinAvailable is specified as a percentage for PickAll ClusterResourcePlacement" - evictionBlockedMissingPlacementMessage = "Eviction is blocked, placement has not propagated resources to target cluster yet" - - evictionAllowedPDBSpecifiedFmt = "Eviction is allowed by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" - evictionBlockedPDBSpecifiedFmt = "Eviction is blocked by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" -) - // Reconciler reconciles a ClusterResourcePlacementEviction object. type Reconciler struct { client.Client @@ -97,8 +76,8 @@ func (r *Reconciler) validateEviction(ctx context.Context, eviction *placementv1 var crp placementv1beta1.ClusterResourcePlacement if err := r.Client.Get(ctx, types.NamespacedName{Name: eviction.Spec.PlacementName}, &crp); err != nil { if k8serrors.IsNotFound(err) { - klog.V(2).InfoS(evictionInvalidMissingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidMissingCRPMessage) + klog.V(2).InfoS(condition.EvictionInvalidMissingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidMissingCRPMessage) return validationResult, nil } return nil, controller.NewAPIServerError(true, err) @@ -108,8 +87,8 @@ func (r *Reconciler) validateEviction(ctx context.Context, eviction *placementv1 defaulter.SetDefaultsClusterResourcePlacement(&crp) if crp.DeletionTimestamp != nil { - klog.V(2).InfoS(evictionInvalidDeletingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidDeletingCRPMessage) + klog.V(2).InfoS(condition.EvictionInvalidDeletingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidDeletingCRPMessage) return validationResult, nil } validationResult.crp = &crp @@ -126,15 +105,15 @@ func (r *Reconciler) validateEviction(ctx context.Context, eviction *placementv1 if evictionTargetBinding == nil { evictionTargetBinding = &crbList.Items[i] } else { - klog.V(2).InfoS(evictionInvalidMultipleCRBMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidMultipleCRBMessage) + klog.V(2).InfoS(condition.EvictionInvalidMultipleCRBMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidMultipleCRBMessage) return validationResult, nil } } } if evictionTargetBinding == nil { klog.V(2).InfoS("Failed to find cluster resource binding for cluster targeted by eviction", "clusterResourcePlacementEviction", eviction.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionInvalid(eviction, evictionInvalidMissingCRBMessage) + markEvictionInvalid(eviction, condition.EvictionInvalidMissingCRBMessage) return validationResult, nil } validationResult.crb = evictionTargetBinding @@ -179,14 +158,14 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if evictionTargetBinding.GetDeletionTimestamp() != nil { klog.V(2).InfoS("ClusterResourceBinding targeted by eviction is being deleted", "clusterResourcePlacementEviction", eviction.Name, "clusterResourceBinding", evictionTargetBinding.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionExecuted(eviction, evictionAllowedPlacementRemovedMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedPlacementRemovedMessage) return nil } if !isPlacementPresent(evictionTargetBinding) { klog.V(2).InfoS("No resources have been placed for ClusterResourceBinding in target cluster", "clusterResourcePlacementEviction", eviction.Name, "clusterResourceBinding", evictionTargetBinding.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionNotExecuted(eviction, evictionBlockedMissingPlacementMessage) + markEvictionNotExecuted(eviction, condition.EvictionBlockedMissingPlacementMessage) return nil } @@ -197,7 +176,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err := r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, evictionAllowedPlacementFailedMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedPlacementFailedMessage) return nil } @@ -207,7 +186,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err = r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, evictionAllowedNoPDBMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedNoPDBMessage) return nil } return controller.NewAPIServerError(true, err) @@ -216,7 +195,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic // handle special case for PickAll CRP. if crp.Spec.Policy.PlacementType == placementv1beta1.PickAllPlacementType { if db.Spec.MaxUnavailable != nil || (db.Spec.MinAvailable != nil && db.Spec.MinAvailable.Type == intstr.String) { - markEvictionNotExecuted(eviction, evictionBlockedMisconfiguredPDBSpecifiedMessage) + markEvictionNotExecuted(eviction, condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage) return nil } } @@ -227,9 +206,9 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err := r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, availableBindings, totalBindings)) + markEvictionExecuted(eviction, fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, availableBindings, totalBindings)) } else { - markEvictionNotExecuted(eviction, fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, availableBindings, totalBindings)) + markEvictionNotExecuted(eviction, fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, availableBindings, totalBindings)) } return nil } @@ -315,8 +294,8 @@ func markEvictionValid(eviction *placementv1alpha1.ClusterResourcePlacementEvict Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionTrue, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionValidReason, - Message: evictionValidMessage, + Reason: condition.ClusterResourcePlacementEvictionValidReason, + Message: condition.EvictionValidMessage, } eviction.SetConditions(cond) @@ -329,7 +308,7 @@ func markEvictionInvalid(eviction *placementv1alpha1.ClusterResourcePlacementEvi Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionInvalidReason, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, Message: message, } eviction.SetConditions(cond) @@ -342,7 +321,7 @@ func markEvictionExecuted(eviction *placementv1alpha1.ClusterResourcePlacementEv Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionExecutedReason, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, Message: message, } eviction.SetConditions(cond) @@ -355,7 +334,7 @@ func markEvictionNotExecuted(eviction *placementv1alpha1.ClusterResourcePlacemen Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionNotExecutedReason, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, Message: message, } eviction.SetConditions(cond) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index d576a82b9..a91d5b877 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -9,8 +9,6 @@ import ( "fmt" "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -23,6 +21,8 @@ import ( placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" + testutilseviction "go.goms.io/fleet/test/utils/eviction" ) const ( @@ -32,18 +32,6 @@ const ( evictionNameTemplate = "eviction-%d" ) -var ( - lessFuncCondition = func(a, b metav1.Condition) bool { - return a.Type < b.Type - } - - evictionStatusCmpOptions = cmp.Options{ - cmpopts.SortSlices(lessFuncCondition), - cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), - cmpopts.EquateEmpty(), - } -) - const ( eventuallyDuration = time.Minute * 2 eventuallyInterval = time.Millisecond * 250 @@ -121,7 +109,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: false, msg: fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, 0, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 0, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -211,7 +202,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: true, msg: fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, 1, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 1, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -286,7 +280,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: false, msg: fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, 0, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 0, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -399,7 +396,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: true, msg: fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, 2, 2)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 2, 2)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -423,66 +423,6 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) }) -func evictionStatusUpdatedActual(isValid *isValidEviction, isExecuted *isExecutedEviction) func() error { - evictionName := fmt.Sprintf(evictionNameTemplate, GinkgoParallelProcess()) - return func() error { - var eviction placementv1alpha1.ClusterResourcePlacementEviction - if err := k8sClient.Get(ctx, types.NamespacedName{Name: evictionName}, &eviction); err != nil { - return err - } - var conditions []metav1.Condition - if isValid != nil { - if isValid.bool { - validCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), - Status: metav1.ConditionTrue, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionValidReason, - Message: isValid.msg, - } - conditions = append(conditions, validCondition) - } else { - invalidCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), - Status: metav1.ConditionFalse, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: isValid.msg, - } - conditions = append(conditions, invalidCondition) - } - } - if isExecuted != nil { - if isExecuted.bool { - executedCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), - Status: metav1.ConditionTrue, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: isExecuted.msg, - } - conditions = append(conditions, executedCondition) - } else { - notExecutedCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), - Status: metav1.ConditionFalse, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: isExecuted.msg, - } - conditions = append(conditions, notExecutedCondition) - } - } - wantStatus := placementv1alpha1.PlacementEvictionStatus{ - Conditions: conditions, - } - if diff := cmp.Diff(eviction.Status, wantStatus, evictionStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) - } - return nil - } -} - func buildTestPickNCRP(crpName string, clusterCount int32) placementv1beta1.ClusterResourcePlacement { return placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -597,13 +537,3 @@ func ensureAllBindingsAreRemoved(crpName string) { ensureCRBRemoved(bindingList.Items[i].Name) } } - -type isValidEviction struct { - bool - msg string -} - -type isExecutedEviction struct { - bool - msg string -} diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_test.go index 6506a921c..333e6670a 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_test.go @@ -22,6 +22,7 @@ import ( placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/defaulter" ) @@ -102,8 +103,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMissingCRPMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMissingCRPMessage, }, wantErr: nil, }, @@ -129,8 +130,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidDeletingCRPMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidDeletingCRPMessage, }, wantErr: nil, }, @@ -150,8 +151,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMultipleCRBMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMultipleCRBMessage, }, wantErr: nil, }, @@ -168,8 +169,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMissingCRBMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMissingCRBMessage, }, wantErr: nil, }, @@ -358,8 +359,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -380,8 +381,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -403,8 +404,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -428,8 +429,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementRemovedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementRemovedMessage, }, wantErr: nil, }, @@ -461,8 +462,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementFailedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementFailedMessage, }, wantErr: nil, }, @@ -500,8 +501,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementFailedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementFailedMessage, }, wantErr: nil, }, @@ -520,8 +521,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedNoPDBMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedNoPDBMessage, }, wantErr: nil, }, @@ -547,8 +548,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMisconfiguredPDBSpecifiedMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage, }, wantErr: nil, }, @@ -574,8 +575,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMisconfiguredPDBSpecifiedMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage, }, wantErr: nil, }, diff --git a/pkg/controllers/rollout/controller.go b/pkg/controllers/rollout/controller.go index e66f4924c..8828d8131 100644 --- a/pkg/controllers/rollout/controller.go +++ b/pkg/controllers/rollout/controller.go @@ -381,7 +381,8 @@ func (r *Reconciler) pickBindingsToRoll(ctx context.Context, allBindings []*flee case fleetv1beta1.BindingStateBound: bindingFailed := false schedulerTargetedBinds = append(schedulerTargetedBinds, binding) - if waitTime, bindingReady := isBindingReady(binding, readyTimeCutOff); bindingReady { + waitTime, bindingReady := isBindingReady(binding, readyTimeCutOff) + if bindingReady { klog.V(3).InfoS("Found a ready bound binding", "clusterResourcePlacement", crpKObj, "binding", bindingKObj) readyBindings = append(readyBindings, binding) } else { @@ -397,20 +398,27 @@ func (r *Reconciler) pickBindingsToRoll(ctx context.Context, allBindings []*flee } else { canBeReadyBindings = append(canBeReadyBindings, binding) } - // PickFromResourceMatchedOverridesForTargetCluster always returns the ordered list of the overrides. - cro, ro, err := overrider.PickFromResourceMatchedOverridesForTargetCluster(ctx, r.Client, binding.Spec.TargetCluster, matchedCROs, matchedROs) - if err != nil { - return nil, nil, false, 0, err - } - // The binding needs update if it's not pointing to the latest resource resourceBinding or the overrides. - if binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name || !equality.Semantic.DeepEqual(binding.Spec.ClusterResourceOverrideSnapshots, cro) || !equality.Semantic.DeepEqual(binding.Spec.ResourceOverrideSnapshots, ro) { - updateInfo := createUpdateInfo(binding, crp, latestResourceSnapshot, cro, ro) - if bindingFailed { - // the binding has been applied but failed to apply, we can safely update it to latest resources without affecting max unavailable count - applyFailedUpdateCandidates = append(applyFailedUpdateCandidates, updateInfo) - } else { - updateCandidates = append(updateCandidates, updateInfo) + + // check to see if binding is not being deleted. + if binding.DeletionTimestamp.IsZero() { + // PickFromResourceMatchedOverridesForTargetCluster always returns the ordered list of the overrides. + cro, ro, err := overrider.PickFromResourceMatchedOverridesForTargetCluster(ctx, r.Client, binding.Spec.TargetCluster, matchedCROs, matchedROs) + if err != nil { + return nil, nil, false, 0, err + } + // The binding needs update if it's not pointing to the latest resource resourceBinding or the overrides. + if binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name || !equality.Semantic.DeepEqual(binding.Spec.ClusterResourceOverrideSnapshots, cro) || !equality.Semantic.DeepEqual(binding.Spec.ResourceOverrideSnapshots, ro) { + updateInfo := createUpdateInfo(binding, crp, latestResourceSnapshot, cro, ro) + if bindingFailed { + // the binding has been applied but failed to apply, we can safely update it to latest resources without affecting max unavailable count + applyFailedUpdateCandidates = append(applyFailedUpdateCandidates, updateInfo) + } else { + updateCandidates = append(updateCandidates, updateInfo) + } } + } else if bindingReady { + // it is being deleted, it can be removed from the cluster at any time, so it can be unavailable at any time + canBeUnavailableBindings = append(canBeUnavailableBindings, binding) } } } diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index 57bd67959..5baff7550 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -12,12 +12,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/utils/ptr" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/controllers/work" @@ -64,7 +64,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should rollout all the selected bindings as soon as they are created", func() { // create CRP var targetCluster int32 = 10 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) // create master resource snapshot that is latest masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) @@ -97,7 +99,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should rollout all the selected bindings when the rollout strategy is not set", func() { // create CRP var targetCluster int32 = 11 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) // remove the strategy rolloutCRP.Spec.Strategy = fleetv1beta1.RolloutStrategy{} Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) @@ -132,7 +136,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should rollout the selected and unselected bindings (not trackable resources)", func() { // create CRP var targetCluster int32 = 11 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) // create master resource snapshot that is latest masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) @@ -230,7 +236,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should rollout both the new scheduling and the new resources (trackable)", func() { // create CRP var targetCluster int32 = 11 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) // create master resource snapshot that is latest masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) @@ -358,7 +366,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should wait for deleting binding delete before we rollout", func() { // create CRP var targetCluster int32 = 5 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) // create master resource snapshot that is latest latestSnapshot := generateResourceSnapshot(rolloutCRP.Name, 1, true) @@ -459,7 +469,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should rollout both the old applied and failed to apply bond the new resources", func() { // create CRP var targetCluster int32 = 5 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) // create master resource snapshot that is latest masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) @@ -526,7 +538,9 @@ var _ = Describe("Test the rollout Controller", func() { It("Should wait designated time before rolling out ", func() { // create CRP var targetCluster int32 = 11 - rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) // remove the strategy rolloutCRP.Spec.Strategy = fleetv1beta1.RolloutStrategy{RollingUpdate: &fleetv1beta1.RollingUpdateConfig{UnavailablePeriodSeconds: ptr.To(60)}} Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) @@ -607,6 +621,206 @@ var _ = Describe("Test the rollout Controller", func() { }, 5*time.Minute, interval).Should(BeTrue(), "rollout controller should roll all the bindings to use the latest resource snapshot") }) + It("Rollout should be blocked, then unblocked by eviction - evict unscheduled binding", func() { + // create CRP + var targetCluster int32 = 2 + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + // Set MaxSurge to 0. + rolloutCRP.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + } + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) + // create master resource snapshot that is latest. + masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + + // create scheduled bindings for master snapshot on target clusters + clusters := make([]string, targetCluster) + for i := 0; i < int(targetCluster); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", binding.Name)) + bindings = append(bindings, binding) + } + + // check that all bindings are bound. + Eventually(func() bool { + for _, binding := range bindings { + err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.GetName()}, binding) + if err != nil { + return false + } + if binding.Spec.State != fleetv1beta1.BindingStateBound || binding.Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") + + // mark one binding as ready i.e. applied and available. + availableBinding := 1 + for i := 0; i < availableBinding; i++ { + markBindingApplied(bindings[i], true) + markBindingAvailable(bindings[i], false) + } + // Current state: one ready binding and one canBeReadyBinding. + // create a new scheduled binding. + cluster3 = "cluster-" + utils.RandStr() + newScheduledBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, cluster3) + Expect(k8sClient.Create(ctx, newScheduledBinding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", newScheduledBinding.Name)) + // add new scheduled binding to list of bindings. + bindings = append(bindings, newScheduledBinding) + + // ensure new binding exists. + Eventually(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding)) + }, timeout, interval).Should(BeTrue(), "new scheduled binding is not found") + + // check if new scheduled binding is not bound. + Consistently(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding) + if err != nil { + return err + } + if newScheduledBinding.Spec.State == fleetv1beta1.BindingStateBound { + return fmt.Errorf("binding %s is in bound state, which is unexpected", newScheduledBinding.Name) + } + return nil + }, timeout, interval).Should(BeNil(), "rollout controller shouldn't roll new scheduled binding to bound state") + + // Current state: rollout is blocked by maxSurge being 0. + // mark first available bound binding as unscheduled and ensure it's not removed. + unscheduledBinding := 1 + for i := 0; i < unscheduledBinding; i++ { + Eventually(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].Name}, bindings[i]) + if err != nil { + return err + } + bindings[i].Spec.State = fleetv1beta1.BindingStateUnscheduled + return k8sClient.Update(ctx, bindings[i]) + }, timeout, interval).Should(BeNil(), "failed to update binding spec to unscheduled") + + // Ensure unscheduled binding is not removed. + Consistently(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].Name}, bindings[i])) + }, timeout, interval).Should(BeTrue(), "rollout controller doesn't remove unscheduled binding") + } + + // simulate eviction by deleting unscheduled binding. + for i := 0; i < unscheduledBinding; i++ { + Expect(k8sClient.Delete(ctx, bindings[i])).Should(Succeed()) + } + + // check to see if rollout is unblocked due to eviction. + for i := unscheduledBinding; i < len(bindings); i++ { + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].GetName()}, bindings[i]) + if err != nil { + return false + } + if bindings[i].Spec.State != fleetv1beta1.BindingStateBound || bindings[i].Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all remaining bindings to Bound state") + } + }) + + It("Rollout should be blocked, then unblocked by eviction - evict bound binding", func() { + // create CRP + var targetCluster int32 = 2 + rolloutCRP = clusterResourcePlacementForTest(testCRPName, + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)) + // Set MaxSurge to 0. + rolloutCRP.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + } + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) + // create master resource snapshot that is latest. + masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + + // create scheduled bindings for master snapshot on target clusters + clusters := make([]string, targetCluster) + for i := 0; i < int(targetCluster); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", binding.Name)) + bindings = append(bindings, binding) + } + + // check that all bindings are bound. + Eventually(func() bool { + for _, binding := range bindings { + err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.GetName()}, binding) + if err != nil { + return false + } + if binding.Spec.State != fleetv1beta1.BindingStateBound || binding.Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") + + // Note: This scenario is very unlikely in production user has to change the target from 2->3->2, + // where scheduler created new scheduled binding but user changed the target number from 3->2 again, before rollout controller reads CRP. + // create a new scheduled binding. + cluster3 = "cluster-" + utils.RandStr() + newScheduledBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, cluster3) + Expect(k8sClient.Create(ctx, newScheduledBinding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", newScheduledBinding.Name)) + // add new scheduled binding to list of bindings. + bindings = append(bindings, newScheduledBinding) + + // ensure new binding exists. + Eventually(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding)) + }, timeout, interval).Should(BeTrue(), "new scheduled binding is not found") + + // Current state: rollout is blocked by maxSurge being 0. + // check if new scheduled binding is not bound. + Consistently(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding) + if err != nil { + return err + } + if newScheduledBinding.Spec.State == fleetv1beta1.BindingStateBound { + return fmt.Errorf("binding %s is in bound state, which is unexpected", newScheduledBinding.Name) + } + return nil + }, timeout, interval).Should(BeNil(), "rollout controller shouldn't roll new scheduled binding to bound state") + + // simulate eviction by deleting first bound binding. + firstBoundBinding := 1 + for i := 0; i < firstBoundBinding; i++ { + Expect(k8sClient.Delete(ctx, bindings[i])).Should(Succeed()) + } + + // check to see if the remaining two bindings are bound. + for i := firstBoundBinding; i < len(bindings); i++ { + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].GetName()}, bindings[i]) + if err != nil { + return false + } + if bindings[i].Spec.State != fleetv1beta1.BindingStateBound || bindings[i].Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all remaining bindings to Bound state") + } + }) + // TODO: should update scheduled bindings to the latest snapshot when it is updated to bound state. // TODO: should count the deleting bindings as can be Unavailable. @@ -707,11 +921,3 @@ func generateResourceSnapshot(testCRPName string, resourceIndex int, isLatest bo } return clusterResourceSnapshot } - -func generateDeletingClusterResourceBinding(targetCluster string) *fleetv1beta1.ClusterResourceBinding { - binding := generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "anything", targetCluster) - binding.DeletionTimestamp = &metav1.Time{ - Time: now, - } - return binding -} diff --git a/pkg/controllers/rollout/controller_test.go b/pkg/controllers/rollout/controller_test.go index 68188d4cd..06c0ac07e 100644 --- a/pkg/controllers/rollout/controller_test.go +++ b/pkg/controllers/rollout/controller_test.go @@ -39,6 +39,7 @@ var ( cluster3 = "cluster-3" cluster4 = "cluster-4" cluster5 = "cluster-5" + cluster6 = "cluster-6" cmpOptions = []cmp.Option{ cmp.AllowUnexported(toBeUpdatedBinding{}), @@ -205,7 +206,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { "test deleting binding block schedule binding on the same cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster1), - generateDeletingClusterResourceBinding(cluster1), + setDeletionTimeStampForBinding(generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), }, wantWait: true, wantErr: false, @@ -213,7 +214,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { "test deleting binding not block binding on different cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster1), - generateDeletingClusterResourceBinding(cluster2), + setDeletionTimeStampForBinding(generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2)), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster3), }, wantWait: false, @@ -221,7 +222,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { }, "test deleting binding cannot co-exsit with a bound binding on same cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ - generateDeletingClusterResourceBinding(cluster1), + setDeletionTimeStampForBinding(generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), }, wantWait: false, @@ -742,114 +743,6 @@ func TestIsBindingReady(t *testing.T) { } func TestPickBindingsToRoll(t *testing.T) { - noMaxUnavailableCRP := clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5)) - noMaxUnavailableCRP.Spec.Strategy.RollingUpdate.MaxUnavailable = &intstr.IntOrString{ - Type: intstr.Int, - IntVal: 0, - } - noMaxSurgeCRP := clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5)) - noMaxSurgeCRP.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{ - Type: intstr.Int, - IntVal: 0, - } - crpWithApplyStrategy := clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)) - crpWithApplyStrategy.Spec.Strategy.ApplyStrategy = &fleetv1beta1.ApplyStrategy{ - Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, - } - // crpWithUnavailablePeriod has a 60s unavailable period - crpWithUnavailablePeriod := clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 3)) - crpWithUnavailablePeriod.Spec.Strategy.RollingUpdate.UnavailablePeriodSeconds = ptr.To(60) - - canBeReadyBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1) - canBeReadyBinding.Generation = 15 - canBeReadyBinding.Status.Conditions = []metav1.Condition{ - { - Type: string(fleetv1beta1.ResourceBindingApplied), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - LastTransitionTime: metav1.Time{ - Time: now.Add(-time.Hour), - }, - }, - } - - // create bindings below to test different wait times - notReadyUnscheduledBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2) - notReadyUnscheduledBinding.Generation = 15 - notReadyUnscheduledBinding.Status.Conditions = []metav1.Condition{ - { - Type: string(fleetv1beta1.ResourceBindingApplied), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - }, - { - Type: string(fleetv1beta1.ResourceBindingAvailable), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - LastTransitionTime: metav1.Time{ - Time: now.Add(-1 * time.Minute), - }, - Reason: work.WorkNotTrackableReason, // Make it not ready - }, - } - - notReadyUnscheduledBinding2 := generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3) - notReadyUnscheduledBinding2.Generation = 15 - notReadyUnscheduledBinding2.Status.Conditions = []metav1.Condition{ - { - Type: string(fleetv1beta1.ResourceBindingApplied), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - }, - { - Type: string(fleetv1beta1.ResourceBindingAvailable), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - LastTransitionTime: metav1.Time{ - Time: now.Add(-35 * time.Second), - }, - Reason: work.WorkNotTrackableReason, // Make it not ready - }, - } - - readyBoundBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-2", cluster2) - readyBoundBinding.Generation = 15 - readyBoundBinding.Status.Conditions = []metav1.Condition{ - { - Type: string(fleetv1beta1.ResourceBindingApplied), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - }, - { - Type: string(fleetv1beta1.ResourceBindingAvailable), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - Reason: work.WorkAvailableReason, // Make it ready - }, - } - - notReadyBoundBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster3) - notReadyBoundBinding.Generation = 15 - notReadyBoundBinding.Status.Conditions = []metav1.Condition{ - { - Type: string(fleetv1beta1.ResourceBindingApplied), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - }, - { - Type: string(fleetv1beta1.ResourceBindingAvailable), - Status: metav1.ConditionTrue, - ObservedGeneration: 15, - LastTransitionTime: metav1.Time{ - Time: now.Add(-35 * time.Second), - }, - Reason: work.WorkNotTrackableReason, // Make it not ready - }, - } tests := map[string]struct { allBindings []*fleetv1beta1.ClusterResourceBinding latestResourceSnapshotName string @@ -871,7 +764,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ { @@ -888,8 +782,13 @@ func TestPickBindingsToRoll(t *testing.T) { generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster1), }, latestResourceSnapshotName: "snapshot-2", - crp: crpWithApplyStrategy, - wantTobeUpdatedBindings: []int{0}, + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), + &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + })), + wantTobeUpdatedBindings: []int{0}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ { State: fleetv1beta1.BindingStateBound, @@ -909,7 +808,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), matchedROs: []*fleetv1alpha1.ResourceOverrideSnapshot{}, matchedCROs: []*fleetv1alpha1.ClusterResourceOverrideSnapshot{}, wantTobeUpdatedBindings: []int{0}, @@ -929,7 +829,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), matchedCROs: []*fleetv1alpha1.ClusterResourceOverrideSnapshot{ { ObjectMeta: metav1.ObjectMeta{ @@ -1022,7 +923,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), matchedCROs: []*fleetv1alpha1.ClusterResourceOverrideSnapshot{ { ObjectMeta: metav1.ObjectMeta{ @@ -1104,7 +1006,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-1", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), matchedCROs: []*fleetv1alpha1.ClusterResourceOverrideSnapshot{ { ObjectMeta: metav1.ObjectMeta{ @@ -1197,7 +1100,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-1", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{}, wantStaleUnselectedBindings: []int{}, wantNeedRoll: false, @@ -1209,7 +1113,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5)), + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ { @@ -1231,7 +1136,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5)), + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0}, wantStaleUnselectedBindings: []int{1, 2, 3, 4}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ @@ -1272,8 +1178,10 @@ func TestPickBindingsToRoll(t *testing.T) { generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster4), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster5), }, - latestResourceSnapshotName: "snapshot-2", - crp: noMaxUnavailableCRP, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0, 1, 2}, wantStaleUnselectedBindings: []int{3, 4}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ @@ -1314,8 +1222,20 @@ func TestPickBindingsToRoll(t *testing.T) { generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster4), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster5), }, - latestResourceSnapshotName: "snapshot-2", - crp: noMaxUnavailableCRP, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), wantTobeUpdatedBindings: []int{}, wantStaleUnselectedBindings: []int{0, 1, 2, 3, 4}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ @@ -1352,7 +1272,8 @@ func TestPickBindingsToRoll(t *testing.T) { allBindings: []*fleetv1beta1.ClusterResourceBinding{}, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5)), + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 5), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{}, wantNeedRoll: false, wantWaitTime: 0, @@ -1364,7 +1285,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-2", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0, 1}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ { @@ -1383,11 +1305,23 @@ func TestPickBindingsToRoll(t *testing.T) { }, "test with scheduled bindings (always update the scheduled binding first)": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ - canBeReadyBinding, + generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster2), }, - latestResourceSnapshotName: "snapshot-2", - crp: crpWithUnavailablePeriod, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 3), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "20%", + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + UnavailablePeriodSeconds: ptr.To(60), + }, nil)), wantTobeUpdatedBindings: []int{1}, wantStaleUnselectedBindings: []int{0}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ @@ -1414,7 +1348,8 @@ func TestPickBindingsToRoll(t *testing.T) { }, latestResourceSnapshotName: "snapshot-1", crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 4)), + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 4), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantTobeUpdatedBindings: []int{0, 2}, // empty list as unscheduled bindings will be removed and are not tracked in the CRP today. wantStaleUnselectedBindings: []int{}, @@ -1471,18 +1406,31 @@ func TestPickBindingsToRoll(t *testing.T) { }, }, crp: clusterResourcePlacementForTest("test", - createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), wantErr: controller.ErrExpectedBehavior, }, "test bound bindings with different waitTimes": { // want the min wait time of bound bindings that are not ready allBindings: []*fleetv1beta1.ClusterResourceBinding{ - notReadyBoundBinding, // notReady, waitTime = t - 35s - canBeReadyBinding, // notReady, no wait time because it does not have available condition yet - readyBoundBinding, // Ready + generateNotReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster3, metav1.Time{Time: now.Add(-35 * time.Second)}), // notReady, waitTime = t - 35s + generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), // notReady, no wait time because it does not have available condition yet, + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-2", cluster2), // Ready }, - latestResourceSnapshotName: "snapshot-2", - crp: crpWithUnavailablePeriod, // UnavailablePeriodSeconds is 60s -> readyTimeCutOff = t - 60s + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 3), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "20%", + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + UnavailablePeriodSeconds: ptr.To(60), + }, nil)), // UnavailablePeriodSeconds is 60s -> readyTimeCutOff = t - 60s wantStaleUnselectedBindings: []int{0, 1}, wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ { @@ -1503,141 +1451,787 @@ func TestPickBindingsToRoll(t *testing.T) { "test unscheduled bindings with different waitTimes": { // want the min wait time of unscheduled bindings that are not ready allBindings: []*fleetv1beta1.ClusterResourceBinding{ - notReadyUnscheduledBinding, // NotReady, waitTime = t - 60s - notReadyUnscheduledBinding2, // NotReady, waitTime = t - 35s + generateNotReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2, metav1.Time{Time: now.Add(-1 * time.Minute)}), // NotReady, waitTime = t - 60s + generateNotReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3, metav1.Time{Time: now.Add(-35 * time.Second)}), // NotReady, waitTime = t - 35s }, - latestResourceSnapshotName: "snapshot-2", - crp: crpWithUnavailablePeriod, // UnavailablePeriodSeconds is 60s -> readyTimeCutOff = t - 60s + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 3), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "20%", + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + UnavailablePeriodSeconds: ptr.To(60), + }, nil)), // UnavailablePeriodSeconds is 60s -> readyTimeCutOff = t - 60s wantStaleUnselectedBindings: []int{}, // empty list as unscheduled bindings will be removed and are not tracked in the CRP today. wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{}, // unscheduled binding does not have desired spec so that putting the empty here wantNeedRoll: true, wantWaitTime: 25 * time.Second, // minWaitTime = (t - 35 seconds) - (t - 60 seconds) = 25 seconds }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - scheme := serviceScheme(t) - var objects []client.Object - for i := range tt.clusters { - objects = append(objects, &tt.clusters[i]) - } - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objects...). - WithStatusSubresource(objects...). - Build() - r := Reconciler{ - Client: fakeClient, - } - resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: tt.latestResourceSnapshotName, - }, - } - gotUpdatedBindings, gotStaleUnselectedBindings, gotNeedRoll, gotWaitTime, err := r.pickBindingsToRoll(context.Background(), tt.allBindings, resourceSnapshot, tt.crp, tt.matchedCROs, tt.matchedROs) - if (err != nil) != (tt.wantErr != nil) || err != nil && !errors.Is(err, tt.wantErr) { - t.Fatalf("pickBindingsToRoll() error = %v, wantErr %v", err, tt.wantErr) - } - if err != nil { - return - } - - wantTobeUpdatedBindings := make([]toBeUpdatedBinding, len(tt.wantTobeUpdatedBindings)) - for i, index := range tt.wantTobeUpdatedBindings { - wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] - wantTobeUpdatedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() - wantTobeUpdatedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] - } - wantStaleUnselectedBindings := make([]toBeUpdatedBinding, len(tt.wantStaleUnselectedBindings)) - for i, index := range tt.wantStaleUnselectedBindings { - wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] - wantStaleUnselectedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() - wantStaleUnselectedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] - } - - if diff := cmp.Diff(wantTobeUpdatedBindings, gotUpdatedBindings, cmpOptions...); diff != "" { - t.Errorf("pickBindingsToRoll() toBeUpdatedBindings mismatch (-want, +got):\n%s", diff) - } - if diff := cmp.Diff(wantStaleUnselectedBindings, gotStaleUnselectedBindings, cmpOptions...); diff != "" { - t.Errorf("pickBindingsToRoll() staleUnselectedBindings mismatch (-want, +got):\n%s", diff) - } - if gotNeedRoll != tt.wantNeedRoll { - t.Errorf("pickBindingsToRoll() = needRoll %v, want %v", gotNeedRoll, tt.wantNeedRoll) - } - - if gotWaitTime.Round(time.Second) != tt.wantWaitTime { - t.Errorf("pickBindingsToRoll() = waitTime %v, want %v", gotWaitTime, tt.wantWaitTime) - } - }) - } -} - -func createPlacementPolicyForTest(placementType fleetv1beta1.PlacementType, numberOfClusters int32) *fleetv1beta1.PlacementPolicy { - return &fleetv1beta1.PlacementPolicy{ - PlacementType: placementType, - NumberOfClusters: ptr.To(numberOfClusters), - Affinity: &fleetv1beta1.Affinity{ - ClusterAffinity: &fleetv1beta1.ClusterAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &fleetv1beta1.ClusterSelector{ - ClusterSelectorTerms: []fleetv1beta1.ClusterSelectorTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "key1": "value1", - }, - }, - }, + "test bound deleting binding - rollout blocked": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + }, + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, generateDefaultRollingUpdateConfig(), nil)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: false, + wantWaitTime: time.Second, + }, + "test policy change with MaxSurge specified, evict resources on unscheduled cluster - rollout allowed for one scheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), + generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster3), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-1", }, }, + wantTobeUpdatedBindings: []int{2}, // specified MaxSurge helps us pick one scheduled binding to rollout. we don't have any ready binding so we don't remove any binding. + wantStaleUnselectedBindings: []int{3}, + wantNeedRoll: true, + wantWaitTime: time.Second, }, - } -} - -func clusterResourcePlacementForTest(crpName string, policy *fleetv1beta1.PlacementPolicy) *fleetv1beta1.ClusterResourcePlacement { - return &fleetv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ - { - Group: corev1.GroupName, - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "east"}, + "test policy change with MaxUnavailable specified, evict resources on unscheduled cluster - rollout allowed for one unscheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster3), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 2, }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-1", }, }, - Policy: policy, - Strategy: fleetv1beta1.RolloutStrategy{ - Type: fleetv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &fleetv1beta1.RollingUpdateConfig{ + wantTobeUpdatedBindings: []int{1}, // specified MaxUnavailable helps us remove unscheduled binding, deleting unscheduled binding even though ready is a canBeUnavailable binding. + wantStaleUnselectedBindings: []int{2, 3}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test resource snapshot change with MaxUnavailable specified, evict resources on ready bound binding - rollout allowed for one ready bound binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ MaxUnavailable: &intstr.IntOrString{ - Type: intstr.String, - StrVal: "20%", + Type: intstr.Int, + IntVal: 2, }, MaxSurge: &intstr.IntOrString{ Type: intstr.Int, - IntVal: 3, + IntVal: 0, }, UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", }, }, + wantTobeUpdatedBindings: []int{1}, // specified MaxUnavailable helps us update bound binding, deleting bound binding even though ready is a canBeUnavailable binding. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: 0, }, - } -} - -func generateFailedToApplyClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string) *fleetv1beta1.ClusterResourceBinding { - binding := generateClusterResourceBinding(state, resourceSnapshotName, targetCluster) - binding.Status.Conditions = append(binding.Status.Conditions, metav1.Condition{ + "test resource snapshot change with MaxUnavailable specified, evict resource on canBeReady binding - rollout blocked": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 2, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", + }, + }, + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{1}, // even with specified MaxUnavailable, we have no ready bindings to allow update. Deleting bound binding even though ready is a canBeUnavailable binding + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test resource snapshot change with MaxUnavailable specified, evict resources on failed to apply bound binding - rollout allowed for one failed to apply bound binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateFailedToApplyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", + }, + }, + wantTobeUpdatedBindings: []int{1}, // failedToApply bound binding always gets rolled out, regardless of MaxUnavailable. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test upscale, evict resources from ready bound binding - rollout allowed for two new scheduled bindings": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster3), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 4), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-1", + }, + }, + wantTobeUpdatedBindings: []int{2, 3}, // both new scheduled bindings are rolled out, target number by itself is greater than canBeReady bindings. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test upscale with policy change MaxSurge specified, evict resources from canBeReady bound binding - rollout allowed for three new scheduled bindings": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateCanBeReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster3), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster4), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster5), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster6), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 4), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster5, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster6, + ResourceSnapshotName: "snapshot-1", + }, + }, + wantTobeUpdatedBindings: []int{2, 3, 4}, // specified MaxSurge helps us pick three new scheduled bindings, target number + MaxSurge is greater than canBeReady bindings. + wantStaleUnselectedBindings: []int{5}, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test upscale with resource change MaxUnavailable specified, evict resources from ready bound binding - rollout allowed for old bound and new scheduled bindings": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-2", cluster3), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-2", cluster4), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 4), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 4, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-2", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-2", + }, + }, + wantTobeUpdatedBindings: []int{1, 2, 3}, // both new scheduled bindings are picked because target number is greater than canBeReady bindings, specified MaxUnavailable helps pick bound binding to updates, deleting ready bound binding is canBeUnavailable binding. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test downscale, evict resources from ready unscheduled binding - rollout allowed for one unscheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + {}, + {}, + }, + wantTobeUpdatedBindings: []int{3}, // more ready bindings than required, we remove the unscheduled binding. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test downscale, evict resources from ready bound binding - rollout allowed for two unscheduled bindings to be deleted": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + {}, + {}, + }, + wantTobeUpdatedBindings: []int{2, 3}, // more ready bindings than required we remove the unscheduled binding, specified MaxUnavailable helps us remove one more unscheduled binding. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test downscale with policy change, evict unscheduled ready binding - rollout allowed for one unscheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster5), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster6), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster5, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster6, + ResourceSnapshotName: "snapshot-1", + }, + }, + wantTobeUpdatedBindings: []int{1}, // more ready bindings than required we remove one unscheduled binding, since two bindings are already canBeReady we don't roll out new scheduled bindings. + wantStaleUnselectedBindings: []int{4, 5}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test downscale with policy change, evict unscheduled failed to apply binding - rollout allowed for new scheduled bindings": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateFailedToApplyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster1)), + generateFailedToApplyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2), + generateFailedToApplyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3), + generateFailedToApplyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster5), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster6), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + {}, + {}, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster5, + ResourceSnapshotName: "snapshot-1", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster6, + ResourceSnapshotName: "snapshot-1", + }, + }, + wantTobeUpdatedBindings: []int{4, 5}, // no ready unscheduled bindings, so scheduled bindings were chosen. + wantStaleUnselectedBindings: []int{}, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test downscale with resource snapshot change, evict ready bound binding - rollout allowed for one unscheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster3, + ResourceSnapshotName: "snapshot-2", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-2", + }, + }, + wantTobeUpdatedBindings: []int{2}, // remove candidates (unscheduled bindings) are chosen before update candidates (bound bindings) + wantStaleUnselectedBindings: []int{1}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test downscale with resource snapshot change, evict ready unscheduled binding - rollout allowed for one unscheduled binding, one bound binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster2), + setDeletionTimeStampForBinding(generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster3)), + generateReadyClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster4), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2), + createPlacementRolloutStrategyForTest(fleetv1beta1.RollingUpdateRolloutStrategyType, &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + UnavailablePeriodSeconds: ptr.To(1), + }, nil)), + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster1, + ResourceSnapshotName: "snapshot-2", + }, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-2", + }, + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster4, + ResourceSnapshotName: "snapshot-2", + }, + }, + wantTobeUpdatedBindings: []int{3, 0}, // remove candidates (unscheduled bindings) are chosen before update candidates (bound bindings), MaxUnavailable helps us pick bound binding to update. + wantStaleUnselectedBindings: []int{1}, + wantNeedRoll: true, + wantWaitTime: 0, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + scheme := serviceScheme(t) + var objects []client.Object + for i := range tt.clusters { + objects = append(objects, &tt.clusters[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + } + resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.latestResourceSnapshotName, + }, + } + gotUpdatedBindings, gotStaleUnselectedBindings, gotNeedRoll, gotWaitTime, err := r.pickBindingsToRoll(context.Background(), tt.allBindings, resourceSnapshot, tt.crp, tt.matchedCROs, tt.matchedROs) + if (err != nil) != (tt.wantErr != nil) || err != nil && !errors.Is(err, tt.wantErr) { + t.Fatalf("pickBindingsToRoll() error = %v, wantErr %v", err, tt.wantErr) + } + if err != nil { + return + } + + wantTobeUpdatedBindings := make([]toBeUpdatedBinding, len(tt.wantTobeUpdatedBindings)) + for i, index := range tt.wantTobeUpdatedBindings { + // Unscheduled bindings are only removed in a single rollout cycle. + if tt.allBindings[index].Spec.State != fleetv1beta1.BindingStateUnscheduled { + wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] + wantTobeUpdatedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() + wantTobeUpdatedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + } else { + wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] + } + } + wantStaleUnselectedBindings := make([]toBeUpdatedBinding, len(tt.wantStaleUnselectedBindings)) + for i, index := range tt.wantStaleUnselectedBindings { + // Unscheduled bindings are only removed in a single rollout cycle. + if tt.allBindings[index].Spec.State != fleetv1beta1.BindingStateUnscheduled { + wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] + wantStaleUnselectedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() + wantStaleUnselectedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + } else { + wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] + } + } + + if diff := cmp.Diff(wantTobeUpdatedBindings, gotUpdatedBindings, cmpOptions...); diff != "" { + t.Errorf("pickBindingsToRoll() toBeUpdatedBindings mismatch (-want, +got):\n%s", diff) + } + if diff := cmp.Diff(wantStaleUnselectedBindings, gotStaleUnselectedBindings, cmpOptions...); diff != "" { + t.Errorf("pickBindingsToRoll() staleUnselectedBindings mismatch (-want, +got):\n%s", diff) + } + if gotNeedRoll != tt.wantNeedRoll { + t.Errorf("pickBindingsToRoll() = needRoll %v, want %v", gotNeedRoll, tt.wantNeedRoll) + } + + if gotWaitTime.Round(time.Second) != tt.wantWaitTime { + t.Errorf("pickBindingsToRoll() = waitTime %v, want %v", gotWaitTime, tt.wantWaitTime) + } + }) + } +} + +func createPlacementPolicyForTest(placementType fleetv1beta1.PlacementType, numberOfClusters int32) *fleetv1beta1.PlacementPolicy { + return &fleetv1beta1.PlacementPolicy{ + PlacementType: placementType, + NumberOfClusters: ptr.To(numberOfClusters), + Affinity: &fleetv1beta1.Affinity{ + ClusterAffinity: &fleetv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &fleetv1beta1.ClusterSelector{ + ClusterSelectorTerms: []fleetv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "key1": "value1", + }, + }, + }, + }, + }, + }, + }, + } +} + +func createPlacementRolloutStrategyForTest(rolloutType fleetv1beta1.RolloutStrategyType, rollingUpdate *fleetv1beta1.RollingUpdateConfig, applyStrategy *fleetv1beta1.ApplyStrategy) fleetv1beta1.RolloutStrategy { + return fleetv1beta1.RolloutStrategy{ + Type: rolloutType, + RollingUpdate: rollingUpdate, + ApplyStrategy: applyStrategy, + } +} + +func clusterResourcePlacementForTest(crpName string, policy *fleetv1beta1.PlacementPolicy, strategy fleetv1beta1.RolloutStrategy) *fleetv1beta1.ClusterResourcePlacement { + return &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"region": "east"}, + }, + }, + }, + Policy: policy, + Strategy: strategy, + }, + } +} + +func generateFailedToApplyClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string) *fleetv1beta1.ClusterResourceBinding { + binding := generateClusterResourceBinding(state, resourceSnapshotName, targetCluster) + binding.Status.Conditions = append(binding.Status.Conditions, metav1.Condition{ Type: string(fleetv1beta1.ResourceBindingApplied), Status: metav1.ConditionFalse, }) return binding } +func generateCanBeReadyClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string) *fleetv1beta1.ClusterResourceBinding { + binding := generateClusterResourceBinding(state, resourceSnapshotName, targetCluster) + binding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + }, + } + return binding +} + +func generateReadyClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string) *fleetv1beta1.ClusterResourceBinding { + binding := generateClusterResourceBinding(state, resourceSnapshotName, targetCluster) + binding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + }, + { + Type: string(fleetv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + Reason: work.WorkAvailableReason, // Make it ready + }, + } + return binding +} + +func generateNotReadyClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string, lastTransitionTime metav1.Time) *fleetv1beta1.ClusterResourceBinding { + binding := generateClusterResourceBinding(state, resourceSnapshotName, targetCluster) + binding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + }, + { + Type: string(fleetv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + LastTransitionTime: lastTransitionTime, + Reason: work.WorkNotTrackableReason, // Make it not ready + }, + } + return binding +} + +func setDeletionTimeStampForBinding(binding *fleetv1beta1.ClusterResourceBinding) *fleetv1beta1.ClusterResourceBinding { + binding.DeletionTimestamp = &metav1.Time{ + Time: now, + } + return binding +} + +func generateDefaultRollingUpdateConfig() *fleetv1beta1.RollingUpdateConfig { + return &fleetv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "20%", + }, + MaxSurge: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + UnavailablePeriodSeconds: ptr.To(1), + } +} + func TestUpdateStaleBindingsStatus(t *testing.T) { currentTime := time.Now() oldTransitionTime := metav1.NewTime(currentTime.Add(-1 * time.Hour)) diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index 7108b18ae..f3df6584f 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -145,6 +145,57 @@ const ( AfterStageTaskWaitTimeElapsedReason = "AfterStageTaskWaitTimeElapsed" ) +// A group of condition reason & message string which is used to populate the ClusterResourcePlacementEviction condition. +const ( + // ClusterResourcePlacementEvictionValidReason is the reason string of condition if the eviction is valid. + ClusterResourcePlacementEvictionValidReason = "ClusterResourcePlacementEvictionValid" + + // ClusterResourcePlacementEvictionInvalidReason is the reason string of condition if the eviction is invalid. + ClusterResourcePlacementEvictionInvalidReason = "ClusterResourcePlacementEvictionInvalid" + + // ClusterResourcePlacementEvictionExecutedReason is the reason string of condition if the eviction is executed. + ClusterResourcePlacementEvictionExecutedReason = "ClusterResourcePlacementEvictionExecuted" + + // ClusterResourcePlacementEvictionNotExecutedReason is the reason string of condition if the eviction is not executed. + ClusterResourcePlacementEvictionNotExecutedReason = "ClusterResourcePlacementEvictionNotExecuted" + + // EvictionInvalidMissingCRPMessage is the message string of invalid eviction condition when CRP is missing. + EvictionInvalidMissingCRPMessage = "Failed to find ClusterResourcePlacement targeted by eviction" + + // EvictionInvalidDeletingCRPMessage is the message string of invalid eviction condition when CRP is deleting. + EvictionInvalidDeletingCRPMessage = "Found deleting ClusterResourcePlacement targeted by eviction" + + // EvictionInvalidMissingCRBMessage is the message string of invalid eviction condition when CRB is missing. + EvictionInvalidMissingCRBMessage = "Failed to find scheduler decision for placement in cluster targeted by eviction" + + // EvictionInvalidMultipleCRBMessage is the message string of invalid eviction condition when more than one CRB is present for cluster targeted by eviction. + EvictionInvalidMultipleCRBMessage = "Found more than one scheduler decision for placement in cluster targeted by eviction" + + // EvictionValidMessage is the message string of valid eviction condition. + EvictionValidMessage = "Eviction is valid" + + // EvictionAllowedNoPDBMessage is the message string for executed condition when no PDB is specified. + EvictionAllowedNoPDBMessage = "Eviction is allowed, no ClusterResourcePlacementDisruptionBudget specified" + + // EvictionAllowedPlacementRemovedMessage is the message string for executed condition when CRB targeted by eviction is being deleted. + EvictionAllowedPlacementRemovedMessage = "Eviction is allowed, resources propagated by placement is currently being removed from cluster targeted by eviction" + + // EvictionAllowedPlacementFailedMessage is the message string for executed condition when placed resources have failed to apply or the resources are not available. + EvictionAllowedPlacementFailedMessage = "Eviction is allowed, placement has failed" + + // EvictionBlockedMisconfiguredPDBSpecifiedMessage is the message string for not executed condition when PDB specified is misconfigured for PickAll CRP. + EvictionBlockedMisconfiguredPDBSpecifiedMessage = "Eviction is blocked by misconfigured ClusterResourcePlacementDisruptionBudget, either MaxUnavailable is specified or MinAvailable is specified as a percentage for PickAll ClusterResourcePlacement" + + // EvictionBlockedMissingPlacementMessage is the message string for not executed condition when resources are yet to be placed in targeted cluster by eviction. + EvictionBlockedMissingPlacementMessage = "Eviction is blocked, placement has not propagated resources to target cluster yet" + + // EvictionAllowedPDBSpecifiedMessageFmt is the message format for executed condition when eviction is allowed by PDB specified. + EvictionAllowedPDBSpecifiedMessageFmt = "Eviction is allowed by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" + + // EvictionBlockedPDBSpecifiedMessageFmt is the message format for not executed condition when eviction is blocked bt PDB specified. + EvictionBlockedPDBSpecifiedMessageFmt = "Eviction is blocked by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" +) + // EqualCondition compares one condition with another; it ignores the LastTransitionTime and Message fields, // and will consider the ObservedGeneration values from the two conditions a match if the current // condition is newer. diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index dddf87dd4..d8e54cdd6 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/controllers/clusterresourceplacement" "go.goms.io/fleet/pkg/controllers/work" @@ -890,6 +891,16 @@ func crpRemovedActual(crpName string) func() error { } } +func crpEvictionRemovedActual(crpEvictionName string) func() error { + return func() error { + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpEvictionName}, &placementv1alpha1.ClusterResourcePlacementEviction{}); !errors.IsNotFound(err) { + return fmt.Errorf("CRP eviction still exists or an unexpected error occurred: %w", err) + } + + return nil + } +} + func validateCRPSnapshotRevisions(crpName string, wantPolicySnapshotRevision, wantResourceSnapshotRevision int) error { matchingLabels := client.MatchingLabels{placementv1beta1.CRPTrackingLabel: crpName} diff --git a/test/e2e/placement_eviction_test.go b/test/e2e/placement_eviction_test.go new file mode 100644 index 000000000..916a5e939 --- /dev/null +++ b/test/e2e/placement_eviction_test.go @@ -0,0 +1,169 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/test/e2e/framework" + testutilseviction "go.goms.io/fleet/test/utils/eviction" +) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding, taint cluster before eviction - No PDB specified", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + taintClusterNames := []string{memberCluster1EastProdName} + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + // Remove taint from member cluster 1. + removeTaintsFromMemberClusters(taintClusterNames) + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("add taint to member cluster 1", func() { + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: taintClusterNames[0], + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: condition.EvictionAllowedNoPDBMessage}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure no resources exist on evicted member cluster with taint", func() { + unSelectedClusters := []*framework.Cluster{memberCluster1EastProd} + for _, cluster := range unSelectedClusters { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(cluster) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to check if resources doesn't exist on member cluster") + } + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the selected clusters with no taint", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the selected clusters") + } + }) +}) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding, no taint specified, evicted cluster is picked again by scheduler - No PDB specified", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: condition.EvictionAllowedNoPDBMessage}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure evicted cluster is picked again by scheduler & update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) +}) diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index 082962b7f..0f5bfd769 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -33,6 +33,7 @@ const ( internalServiceExportNameTemplate = "ise-%d" internalServiceImportNameTemplate = "isi-%d" endpointSliceExportNameTemplate = "ep-%d" + crpEvictionNameTemplate = "crpe-%d" customDeletionBlockerFinalizer = "custom-deletion-blocker-finalizer" workNamespaceLabelName = "process" diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 99f33e997..98ac0c761 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -917,6 +917,17 @@ func ensureCRPAndRelatedResourcesDeletion(crpName string, memberClusters []*fram cleanupWorkResources() } +func ensureCRPEvictionDeletion(crpEvictionName string) { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + } + Expect(hubClient.Delete(ctx, crpe)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete CRP eviction") + removedActual := crpEvictionRemovedActual(crpEvictionName) + Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "CRP eviction still exists") +} + // verifyWorkPropagationAndMarkAsAvailable verifies that works derived from a specific CPR have been created // for a specific cluster, and marks these works in the specific member cluster's // reserved namespace as applied and available. diff --git a/test/utils/eviction/eviction_status.go b/test/utils/eviction/eviction_status.go new file mode 100644 index 000000000..06cc106ab --- /dev/null +++ b/test/utils/eviction/eviction_status.go @@ -0,0 +1,100 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package eviction + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" + "go.goms.io/fleet/pkg/utils/condition" +) + +var ( + lessFuncCondition = func(a, b metav1.Condition) bool { + return a.Type < b.Type + } + evictionStatusCmpOptions = cmp.Options{ + cmpopts.SortSlices(lessFuncCondition), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), + cmpopts.EquateEmpty(), + } +) + +func StatusUpdatedActual(ctx context.Context, client client.Client, evictionName string, isValidEviction *IsValidEviction, isExecutedEviction *IsExecutedEviction) func() error { + return func() error { + var eviction placementv1alpha1.ClusterResourcePlacementEviction + if err := client.Get(ctx, types.NamespacedName{Name: evictionName}, &eviction); err != nil { + return err + } + var conditions []metav1.Condition + if isValidEviction != nil { + if isValidEviction.IsValid { + validCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), + Status: metav1.ConditionTrue, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionValidReason, + Message: isValidEviction.Msg, + } + conditions = append(conditions, validCondition) + } else { + invalidCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), + Status: metav1.ConditionFalse, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: isValidEviction.Msg, + } + conditions = append(conditions, invalidCondition) + } + } + if isExecutedEviction != nil { + if isExecutedEviction.IsExecuted { + executedCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), + Status: metav1.ConditionTrue, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: isExecutedEviction.Msg, + } + conditions = append(conditions, executedCondition) + } else { + notExecutedCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), + Status: metav1.ConditionFalse, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: isExecutedEviction.Msg, + } + conditions = append(conditions, notExecutedCondition) + } + } + wantStatus := placementv1alpha1.PlacementEvictionStatus{ + Conditions: conditions, + } + if diff := cmp.Diff(eviction.Status, wantStatus, evictionStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + } +} + +type IsValidEviction struct { + IsValid bool + Msg string +} + +type IsExecutedEviction struct { + IsExecuted bool + Msg string +}