diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index 6d06cb15b..6c25f0189 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -10,7 +10,7 @@ Licensed under the MIT license. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index d26fe49a5..1a3f72658 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -10,10 +10,11 @@ Licensed under the MIT license. package v1alpha1 import ( - "go.goms.io/fleet/apis/placement/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + + "go.goms.io/fleet/apis/placement/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 803ac0287..497454532 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -10,7 +10,7 @@ Licensed under the MIT license. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 0d4061551..ac4844274 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -11,7 +11,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index cc3f05315..6140ca8af 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -18,6 +18,7 @@ import ( "go.uber.org/atomic" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" + equality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,6 +56,10 @@ import ( var ( // maxFailedResourcePlacementLimit indicates the max number of failed resource placements to include in the status. maxFailedResourcePlacementLimit = 100 + // maxDriftedResourcePlacementLimit indicates the max number of drifted resource placements to include in the status. + maxDriftedResourcePlacementLimit = 100 + // maxDiffedResourcePlacementLimit indicates the max number of diffed resource placements to include in the status. + maxDiffedResourcePlacementLimit = 100 errResourceSnapshotNotFound = fmt.Errorf("the master resource snapshot is not found") ) @@ -147,6 +152,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req controllerruntime.Reques resourceBinding.RemoveCondition(string(i.ResourceBindingConditionType())) } resourceBinding.Status.FailedPlacements = nil + resourceBinding.Status.DriftedPlacements = nil + resourceBinding.Status.DiffedPlacements = nil if overrideSucceeded { overrideReason := condition.OverriddenSucceededReason overrideMessage := "Successfully applied the override rules on the resources" @@ -755,25 +762,58 @@ func setBindingStatus(works map[string]*fleetv1beta1.Work, resourceBinding *flee resourceBinding.SetConditions(availableCond) } resourceBinding.Status.FailedPlacements = nil + resourceBinding.Status.DiffedPlacements = nil + resourceBinding.Status.DriftedPlacements = nil // collect and set the failed resource placements to the binding if not all the works are available - if appliedCond.Status != metav1.ConditionTrue || availableCond.Status != metav1.ConditionTrue { - failedResourcePlacements := make([]fleetv1beta1.FailedResourcePlacement, 0, maxFailedResourcePlacementLimit) // preallocate the memory - for _, w := range works { - if w.DeletionTimestamp != nil { - klog.V(2).InfoS("Ignoring the deleting work", "clusterResourceBinding", bindingRef, "work", klog.KObj(w)) - continue // ignore the deleting work - } + driftedResourcePlacements := make([]fleetv1beta1.DriftedResourcePlacement, 0, maxDriftedResourcePlacementLimit) // preallocate the memory + failedResourcePlacements := make([]fleetv1beta1.FailedResourcePlacement, 0, maxFailedResourcePlacementLimit) // preallocate the memory + diffedResourcePlacements := make([]fleetv1beta1.DiffedResourcePlacement, 0, maxDiffedResourcePlacementLimit) // preallocate the memory + for _, w := range works { + if w.DeletionTimestamp != nil { + klog.V(2).InfoS("Ignoring the deleting work", "clusterResourceBinding", bindingRef, "work", klog.KObj(w)) + continue // ignore the deleting work + } + + // Failed placements (resources that cannot be applied or failed to get available) will only appear when either + // the Applied or Available conditions (on the work object) are set to False + if appliedCond.Status != metav1.ConditionTrue || availableCond.Status != metav1.ConditionTrue { failedManifests := extractFailedResourcePlacementsFromWork(w) failedResourcePlacements = append(failedResourcePlacements, failedManifests...) } - // cut the list to keep only the max limit - if len(failedResourcePlacements) > maxFailedResourcePlacementLimit { - failedResourcePlacements = failedResourcePlacements[0:maxFailedResourcePlacementLimit] + // Diffed placements can only appear when the Applied condition is set to False. + if appliedCond.Status == metav1.ConditionFalse { + diffedManifests := extractDiffedResourcePlacementsFromWork(w) + diffedResourcePlacements = append(diffedResourcePlacements, diffedManifests...) } + // Drifted placements can appear in any situation (Applied condition is True or False) + driftedManifests := extractDriftedResourcePlacementsFromWork(w) + driftedResourcePlacements = append(driftedResourcePlacements, driftedManifests...) + } + // cut the list to keep only the max limit + if len(failedResourcePlacements) > maxFailedResourcePlacementLimit { + failedResourcePlacements = failedResourcePlacements[0:maxFailedResourcePlacementLimit] + } + if len(failedResourcePlacements) > 0 { resourceBinding.Status.FailedPlacements = failedResourcePlacements - if len(failedResourcePlacements) > 0 { - klog.V(2).InfoS("Populated failed manifests", "clusterResourceBinding", bindingRef, "numberOfFailedPlacements", len(failedResourcePlacements)) - } + klog.V(2).InfoS("Populated failed manifests", "clusterResourceBinding", bindingRef, "numberOfFailedPlacements", len(failedResourcePlacements)) + } + + // cut the list to keep only the max limit + if len(diffedResourcePlacements) > maxDiffedResourcePlacementLimit { + diffedResourcePlacements = diffedResourcePlacements[0:maxDiffedResourcePlacementLimit] + } + if len(diffedResourcePlacements) > 0 { + resourceBinding.Status.DiffedPlacements = diffedResourcePlacements + klog.V(2).InfoS("Populated diffed manifests", "clusterResourceBinding", bindingRef, "numberOfDiffedPlacements", len(diffedResourcePlacements)) + } + + // cut the list to keep only the max limit + if len(driftedResourcePlacements) > maxDriftedResourcePlacementLimit { + driftedResourcePlacements = driftedResourcePlacements[0:maxDriftedResourcePlacementLimit] + } + if len(driftedResourcePlacements) > 0 { + resourceBinding.Status.DriftedPlacements = driftedResourcePlacements + klog.V(2).InfoS("Populated drifted manifests", "clusterResourceBinding", bindingRef, "numberOfDriftedPlacements", len(driftedResourcePlacements)) } } @@ -955,6 +995,106 @@ func extractFailedResourcePlacementsFromWork(work *fleetv1beta1.Work) []fleetv1b return res } +// extractDriftedResourcePlacementsFromWork extracts the drifted placements from work +func extractDriftedResourcePlacementsFromWork(work *fleetv1beta1.Work) []fleetv1beta1.DriftedResourcePlacement { + // check if the work is generated by an enveloped object + envelopeType, isEnveloped := work.GetLabels()[fleetv1beta1.EnvelopeTypeLabel] + var envelopObjName, envelopObjNamespace string + if isEnveloped { + // If the work generated by an enveloped object, it must contain those labels. + envelopObjName = work.GetLabels()[fleetv1beta1.EnvelopeNameLabel] + envelopObjNamespace = work.GetLabels()[fleetv1beta1.EnvelopeNamespaceLabel] + } + res := make([]fleetv1beta1.DriftedResourcePlacement, 0, len(work.Status.ManifestConditions)) + for _, manifestCondition := range work.Status.ManifestConditions { + if manifestCondition.DriftDetails == nil { + continue + } + driftedManifest := fleetv1beta1.DriftedResourcePlacement{ + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: manifestCondition.Identifier.Group, + Version: manifestCondition.Identifier.Version, + Kind: manifestCondition.Identifier.Kind, + Name: manifestCondition.Identifier.Name, + Namespace: manifestCondition.Identifier.Namespace, + }, + ObservationTime: manifestCondition.DriftDetails.ObservationTime, + TargetClusterObservedGeneration: manifestCondition.DriftDetails.ObservedInMemberClusterGeneration, + FirstDriftedObservedTime: manifestCondition.DriftDetails.FirstDriftedObservedTime, + ObservedDrifts: manifestCondition.DriftDetails.ObservedDrifts, + } + + if isEnveloped { + driftedManifest.ResourceIdentifier.Envelope = &fleetv1beta1.EnvelopeIdentifier{ + Name: envelopObjName, + Namespace: envelopObjNamespace, + Type: fleetv1beta1.EnvelopeType(envelopeType), + } + klog.V(2).InfoS("Found a drifted enveloped manifest", + "manifestName", manifestCondition.Identifier.Name, + "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind, + "envelopeType", envelopeType, "envelopObjName", envelopObjName, "envelopObjNamespace", envelopObjNamespace) + } else { + klog.V(2).InfoS("Found a drifted manifest", + "manifestName", manifestCondition.Identifier.Name, "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind) + } + res = append(res, driftedManifest) + } + return res +} + +// extractDiffedResourcePlacementsFromWork extracts the diffed placements from work +func extractDiffedResourcePlacementsFromWork(work *fleetv1beta1.Work) []fleetv1beta1.DiffedResourcePlacement { + // check if the work is generated by an enveloped object + envelopeType, isEnveloped := work.GetLabels()[fleetv1beta1.EnvelopeTypeLabel] + var envelopObjName, envelopObjNamespace string + if isEnveloped { + // If the work generated by an enveloped object, it must contain those labels. + envelopObjName = work.GetLabels()[fleetv1beta1.EnvelopeNameLabel] + envelopObjNamespace = work.GetLabels()[fleetv1beta1.EnvelopeNamespaceLabel] + } + res := make([]fleetv1beta1.DiffedResourcePlacement, 0, len(work.Status.ManifestConditions)) + for _, manifestCondition := range work.Status.ManifestConditions { + if manifestCondition.DiffDetails == nil { + continue + } + diffedManifest := fleetv1beta1.DiffedResourcePlacement{ + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: manifestCondition.Identifier.Group, + Version: manifestCondition.Identifier.Version, + Kind: manifestCondition.Identifier.Kind, + Name: manifestCondition.Identifier.Name, + Namespace: manifestCondition.Identifier.Namespace, + }, + ObservationTime: manifestCondition.DiffDetails.ObservationTime, + TargetClusterObservedGeneration: manifestCondition.DiffDetails.ObservedInMemberClusterGeneration, + FirstDiffedObservedTime: manifestCondition.DiffDetails.FirstDiffedObservedTime, + ObservedDiffs: manifestCondition.DiffDetails.ObservedDiffs, + } + + if isEnveloped { + diffedManifest.ResourceIdentifier.Envelope = &fleetv1beta1.EnvelopeIdentifier{ + Name: envelopObjName, + Namespace: envelopObjNamespace, + Type: fleetv1beta1.EnvelopeType(envelopeType), + } + klog.V(2).InfoS("Found a diffed enveloped manifest", + "manifestName", manifestCondition.Identifier.Name, + "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind, + "envelopeType", envelopeType, "envelopObjName", envelopObjName, "envelopObjNamespace", envelopObjNamespace) + } else { + klog.V(2).InfoS("Found a diffed manifest", + "manifestName", manifestCondition.Identifier.Name, "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind) + } + res = append(res, diffedManifest) + } + return res +} + // SetupWithManager sets up the controller with the Manager. // It watches binding events and also update/delete events for work. func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { @@ -1009,42 +1149,29 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { "Failed to process an update event for work object") return } - oldAppliedCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) - newAppliedCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) - oldAvailableCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - newAvailableCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - // we try to filter out events, we only need to handle the updated event if the applied or available condition flip between true and false - // or the failed placements are changed. - if condition.EqualCondition(oldAppliedCondition, newAppliedCondition) && condition.EqualCondition(oldAvailableCondition, newAvailableCondition) { - if condition.IsConditionStatusFalse(newAppliedCondition, newWork.Generation) || condition.IsConditionStatusFalse(newAvailableCondition, newWork.Generation) { - // we need to compare the failed placement if the work is not applied or available - oldFailedPlacements := extractFailedResourcePlacementsFromWork(oldWork) - newFailedPlacements := extractFailedResourcePlacementsFromWork(newWork) - if utils.IsFailedResourcePlacementsEqual(oldFailedPlacements, newFailedPlacements) { - klog.V(2).InfoS("The failed placement list didn't change on failed work, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) - return - } - } else { - oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] - newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] - if oldResourceSnapshot == "" || newResourceSnapshot == "" { - klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")), - "Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot, - "newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot) - return - } - // There is an edge case that, the work spec is the same but from different resourceSnapshots. - // WorkGenerator will update the work because of the label changes, but the generation is the same. - // When the normal update happens, the controller will set the applied condition as false and wait - // until the work condition has been changed. - // In this edge case, we need to requeue the binding to update the binding status. - if oldResourceSnapshot == newResourceSnapshot { - klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) - return - } + if !equality.Semantic.DeepEqual(oldWork.Status, newWork.Status) { + klog.V(2).InfoS("Work status has been changed", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + } else { + oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] + newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] + if oldResourceSnapshot == "" || newResourceSnapshot == "" { + klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")), + "Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot, + "newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot) + return + } + // There is an edge case that, the work spec is the same but from different resourceSnapshots. + // WorkGenerator will update the work because of the label changes, but the generation is the same. + // When the normal update happens, the controller will set the applied condition as false and wait + // until the work condition has been changed. + // In this edge case, we need to requeue the binding to update the binding status. + if oldResourceSnapshot == newResourceSnapshot { + klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + return } } + // We need to update the binding status in this case klog.V(2).InfoS("Received a work update event that we need to handle", "work", klog.KObj(newWork), "parentBindingName", parentBindingName) queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 40e60c726..1a6a61da7 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -57,6 +57,21 @@ var ( fakeFailedAvailableReason = "fakeNotAvailableReason" fakeFailedAvailableMessage = "fake not available message" + + // Define a specific time + specificTime = time.Date(2024, time.November, 19, 15, 30, 0, 0, time.UTC) + + // Define Drift Details + configmapPatchDetail = placementv1beta1.PatchDetail{ + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + } + servicePatchDetail = placementv1beta1.PatchDetail{ + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + } ) const ( @@ -336,7 +351,7 @@ var _ = Describe("Test Work Generator Controller", func() { // mark the work available markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) }) It("Should treat the unscheduled binding as bound and not remove work", func() { @@ -426,33 +441,79 @@ var _ = Describe("Test Work Generator Controller", func() { }) It("Should update the binding when the work overall condition changed", func() { - // mark the work as not applied with failed manifests - markWorkWithFailedToApplyAndNotAvailable(&work) + // mark the work as not applied with failed manifests, and diffed manifests + markWorkWithFailedToApplyAndNotAvailable(&work, true, true) // check the binding status that it should have failed placement - verifyBindStatusNotAppliedWithFailedPlacement(binding, false) // mark the work applied but still not available with failed manifests + verifyBindStatusNotAppliedWithTwoPlacements(binding, false, true, true, true) // mark the work applied but still not available with failed manifests // mark the work available directly markWorkAvailable(&work) - // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + // check the binding status that it should be marked as available true eventually with 2 drift placements + verifyBindStatusAvail(binding, false, true) }) It("Should update the binding when the failed placement list has changed but over all condition didn't change", func() { // mark the work as not applied with failed manifests - markWorkWithFailedToApplyAndNotAvailable(&work) + markWorkWithFailedToApplyAndNotAvailable(&work, false, false) // check the binding status that it should have failed placement - verifyBindStatusNotAppliedWithFailedPlacement(binding, false) // mark the work applied but still not available with failed manifests + verifyBindStatusNotAppliedWithTwoPlacements(binding, false, true, false, false) // mark the work applied but still not available with failed manifests // change one of the failed manifests to be applied - markWorkAsAppliedButNotAvailableWithFailedManifest(&work) + markWorkAsAppliedButNotAvailableWithFailedManifest(&work, false) + // check the binding status that it should be applied but not available and with two failed placement + verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, false) + // mark one of the failed manifests as available but no change in the overall condition + markOneManifestAvailable(&work, false) + // check the binding status that it should be applied but not available and with one failed placement + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false) + // mark the work available directly + markWorkAvailable(&work) + // check the binding status that it should be marked as available true eventually + verifyBindStatusAvail(binding, false, false) + }) + + It("Should update the binding when the diffed placement list has changed but over all condition didn't change", func() { + // mark the work as not applied with failed manifests and diffed manifests + markWorkWithFailedToApplyAndNotAvailable(&work, true, false) + // check the binding status that it have failed placements and diffed placements + verifyBindStatusNotAppliedWithTwoPlacements(binding, false, true, true, false) + // change one of the failed manifests to be applied and diff manifests to be aligned (applied is now true so no diff placements) + markWorkAsAppliedButNotAvailableWithFailedManifest(&work, false) // check the binding status that it should be applied but not available and with two failed placement - verifyBindStatusNotAvailableWithTwoFailedPlacement(binding, false) + // placement list should be changed + verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, false) // mark one of the failed manifests as available but no change in the overall condition - markOneManifestAvailable(&work) + markOneManifestAvailable(&work, false) // check the binding status that it should be applied but not available and with one failed placement - verifyBindStatusNotAvailableWithOneFailedPlacement(binding, false) + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false) // mark the work available directly markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) + }) + + It("Should update the binding when the drifted placement list has changed but over all condition didn't change", func() { + // mark the work as not applied with failed manifests and drifted manifests + markWorkWithFailedToApplyAndNotAvailable(&work, false, true) + // check the binding status that it have failed placements and drifted placements + verifyBindStatusNotAppliedWithTwoPlacements(binding, false, true, false, true) + // change one of the failed manifests to be applied and drift manifest to be aligned + markWorkAsAppliedButNotAvailableWithFailedManifest(&work, true) + // check the binding status that it should be applied but not available and with two failed placement and 2 drifted placement + verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, true) + // mark one of the failed manifests as available and no drift placements but no change in the overall condition + markOneManifestAvailable(&work, true) + // check the binding status that it should be applied but not available and with one failed placement and one drifted placement + // placement list should be changed + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, true) + // mark the work available directly + markWorkAvailable(&work) + // check the binding status that it should be marked as available true eventually with one drift placement + verifyBindStatusAvailableWithOnePlacement(binding, false) + // mark the work with no drift placements + markWorkWithNoDrift(&work) + // check the binding status that it should be marked as available true eventually with no drift placement + // placement list should be changed + verifyBindStatusAvail(binding, false, false) + }) It("Should continue to update the binding status even if the master resource snapshot is deleted after the work is synced", func() { @@ -469,7 +530,7 @@ var _ = Describe("Test Work Generator Controller", func() { // mark the work available which should trigger a reconcile loop and copy the status from the work to the binding even if the work has no annotation markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) }) It("Should mark the binding as failed to sync if the master resource snapshot does not exist and the work do not sync ", func() { @@ -628,7 +689,7 @@ var _ = Describe("Test Work Generator Controller", func() { markWorkAvailable(&work) markWorkAvailable(&envWork) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) }) It("Should modify the enveloped work object with the same name", func() { @@ -905,7 +966,7 @@ var _ = Describe("Test Work Generator Controller", func() { verifyBindStatusAppliedNotAvailable(binding, false) markWorkAvailable(&secondWork) // check the binding status that it should be marked as applied true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) }) It("Should create all the work in the target namespace after some failed to apply but eventually succeeded", func() { @@ -969,16 +1030,16 @@ var _ = Describe("Test Work Generator Controller", func() { verifyBindingStatusSyncedNotApplied(binding, false, true) // mark one work applied while the other failed markWorkApplied(&work) - markWorkWithFailedToApplyAndNotAvailable(&secondWork) + markWorkWithFailedToApplyAndNotAvailable(&secondWork, false, false) // check the binding status that it should be marked as applied true eventually - verifyBindStatusNotAppliedWithFailedPlacement(binding, false) + verifyBindStatusNotAppliedWithTwoPlacements(binding, false, true, false, false) // mark failed the work available markWorkAvailable(&secondWork) // only one work available is still just applied verifyBindStatusAppliedNotAvailable(binding, false) markWorkAvailable(&work) // check the binding status that it should be marked as applied true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) }) It("Should update existing work and create more work in the target namespace when resource snapshots change", func() { @@ -1274,7 +1335,7 @@ var _ = Describe("Test Work Generator Controller", func() { // mark the work available markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, true) + verifyBindStatusAvail(binding, true, false) }) It("Should treat the unscheduled binding as bound", func() { @@ -1509,7 +1570,7 @@ var _ = Describe("Test Work Generator Controller", func() { // mark the work available markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually - verifyBindStatusAvail(binding, false) + verifyBindStatusAvail(binding, false, false) checkRolloutStartedNotUpdated(rolloutCond, binding) }) @@ -1669,7 +1730,7 @@ func verifyBindStatusAppliedNotAvailable(binding *placementv1beta1.ClusterResour }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) } -func verifyBindStatusAvail(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { +func verifyBindStatusAvail(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasDriftPlacements bool) { Eventually(func() string { Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) overrideReason := condition.OverrideNotSpecifiedReason @@ -1710,12 +1771,47 @@ func verifyBindStatusAvail(binding *placementv1beta1.ClusterResourceBinding, has }, }, FailedPlacements: nil, + DiffedPlacements: nil, + } + if hasDriftPlacements { + wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 2, + ObservedDrifts: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + }, + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + } } return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got):\n", binding.Name)) } -func verifyBindStatusNotAppliedWithFailedPlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { +func verifyBindStatusNotAppliedWithTwoPlacements(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacements, hasDiffedPlacements, hasDriftedPlacements bool) { Eventually(func() string { Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) overrideReason := condition.OverrideNotSpecifiedReason @@ -1723,7 +1819,35 @@ func verifyBindStatusNotAppliedWithFailedPlacement(binding *placementv1beta1.Clu overrideReason = condition.OverriddenSucceededReason } wantStatus := placementv1beta1.ResourceBindingStatus{ - FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourceBindingRolloutStarted), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingOverridden), + Status: metav1.ConditionTrue, + Reason: overrideReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingWorkSynchronized), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingApplied), + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAppliedReason, + ObservedGeneration: binding.GetGeneration(), + }, + }, + } + if hasFailedPlacements { + wantStatus.FailedPlacements = []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ Group: "", @@ -1754,7 +1878,88 @@ func verifyBindStatusNotAppliedWithFailedPlacement(binding *placementv1beta1.Clu Message: fakeFailedAppliedMessage, }, }, - }, + } + } + if hasDiffedPlacements { + wantStatus.DiffedPlacements = []placementv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDiffedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 2, + ObservedDiffs: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + }, + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + FirstDiffedObservedTime: metav1.Time{Time: specificTime}, + ObservedDiffs: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + } + } + if hasDriftedPlacements { + wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 2, + ObservedDrifts: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + }, + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + } + } + return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) + }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) +} + +func verifyBindStatusNotAvailableWithTwoPlacements(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacements, hasDriftedPlacements bool) { + Eventually(func() string { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + overrideReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideReason = condition.OverriddenSucceededReason + } + wantStatus := placementv1beta1.ResourceBindingStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceBindingRolloutStarted), @@ -1776,25 +1981,20 @@ func verifyBindStatusNotAppliedWithFailedPlacement(binding *placementv1beta1.Clu }, { Type: string(placementv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingAvailable), Status: metav1.ConditionFalse, - Reason: condition.WorkNotAppliedReason, + Reason: condition.WorkNotAvailableReason, ObservedGeneration: binding.GetGeneration(), }, }, } - return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) - }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) -} - -func verifyBindStatusNotAvailableWithTwoFailedPlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { - Eventually(func() string { - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) - overrideReason := condition.OverrideNotSpecifiedReason - if hasOverride { - overrideReason = condition.OverriddenSucceededReason - } - wantStatus := placementv1beta1.ResourceBindingStatus{ - FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + if hasFailedPlacements { + wantStatus.FailedPlacements = []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ Group: "", @@ -1825,7 +2025,54 @@ func verifyBindStatusNotAvailableWithTwoFailedPlacement(binding *placementv1beta Message: fakeFailedAvailableMessage, }, }, - }, + } + } + if hasDriftedPlacements { + wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 2, + ObservedDrifts: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + }, + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + } + } + return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) + }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) +} + +func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacement, hasDriftedPlacement bool) { + Eventually(func() string { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + overrideReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideReason = condition.OverriddenSucceededReason + } + wantStatus := placementv1beta1.ResourceBindingStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceBindingRolloutStarted), @@ -1859,19 +2106,8 @@ func verifyBindStatusNotAvailableWithTwoFailedPlacement(binding *placementv1beta }, }, } - return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) - }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) -} - -func verifyBindStatusNotAvailableWithOneFailedPlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { - Eventually(func() string { - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) - overrideReason := condition.OverrideNotSpecifiedReason - if hasOverride { - overrideReason = condition.OverriddenSucceededReason - } - wantStatus := placementv1beta1.ResourceBindingStatus{ - FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + if hasFailedPlacement { + wantStatus.FailedPlacements = []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ Group: "", @@ -1887,7 +2123,40 @@ func verifyBindStatusNotAvailableWithOneFailedPlacement(binding *placementv1beta Message: fakeFailedAvailableMessage, }, }, - }, + } + } + + if hasDriftedPlacement { + wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + } + } + return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) + }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) +} + +func verifyBindStatusAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { + Eventually(func() string { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + overrideReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideReason = condition.OverriddenSucceededReason + } + wantStatus := placementv1beta1.ResourceBindingStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceBindingRolloutStarted), @@ -1915,11 +2184,28 @@ func verifyBindStatusNotAvailableWithOneFailedPlacement(binding *placementv1beta }, { Type: string(placementv1beta1.ResourceBindingAvailable), - Status: metav1.ConditionFalse, - Reason: condition.WorkNotAvailableReason, + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAvailableReason, ObservedGeneration: binding.GetGeneration(), }, }, + DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + }, + }, } return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) @@ -2020,8 +2306,69 @@ func markWorkAvailable(work *placementv1beta1.Work) { By(fmt.Sprintf("resource work `%s` is marked as available", work.Name)) } +func markWorkWithNoDrift(work *placementv1beta1.Work) { + work.Status.ManifestConditions = []placementv1beta1.ManifestCondition{ + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "fakeAppliedManifest", + Message: "fake apply manifest", + LastTransitionTime: metav1.Now(), + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "fakeAvailableManifest", + Message: "fake available manifest", + LastTransitionTime: metav1.Now(), + }, + }, + DriftDetails: nil, + }, + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "fakeAppliedManifest", + Message: "fake apply manifest", + LastTransitionTime: metav1.Now(), + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "fakeAvailableManifest", + Message: "fake available manifest", + LastTransitionTime: metav1.Now(), + }, + }, + DriftDetails: nil, + }, + } + Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed()) + By(fmt.Sprintf("resource work `%s` is marked with no drift", work.Name)) +} + // markWorkWithFailedToApplyAndNotAvailable marks the work as not applied with failedPlacement -func markWorkWithFailedToApplyAndNotAvailable(work *placementv1beta1.Work) { +func markWorkWithFailedToApplyAndNotAvailable(work *placementv1beta1.Work, hasDiffedDetails, hasDriftedDetails bool) { meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ Status: metav1.ConditionFalse, Type: placementv1beta1.WorkConditionTypeApplied, @@ -2085,12 +2432,49 @@ func markWorkWithFailedToApplyAndNotAvailable(work *placementv1beta1.Work) { }, }, } + if hasDiffedDetails { + work.Status.ManifestConditions[0].DiffDetails = &placementv1beta1.DiffDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDiffedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 2, + ObservedDiffs: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + } + work.Status.ManifestConditions[1].DiffDetails = &placementv1beta1.DiffDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDiffedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 1, + ObservedDiffs: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + } + } + + if hasDriftedDetails { + work.Status.ManifestConditions[0].DriftDetails = &placementv1beta1.DriftDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 2, + ObservedDrifts: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + } + work.Status.ManifestConditions[1].DriftDetails = &placementv1beta1.DriftDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + } + } Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed()) By(fmt.Sprintf("resource work `%s` is marked as available", work.Name)) } // markWorkAsAppliedButNotAvailableWithFailedManifest marks the work as not available with failedPlacement -func markWorkAsAppliedButNotAvailableWithFailedManifest(work *placementv1beta1.Work) { +func markWorkAsAppliedButNotAvailableWithFailedManifest(work *placementv1beta1.Work, hasDriftedDetails bool) { meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ Status: metav1.ConditionTrue, Type: placementv1beta1.WorkConditionTypeApplied, @@ -2153,11 +2537,29 @@ func markWorkAsAppliedButNotAvailableWithFailedManifest(work *placementv1beta1.W }, }, } + if hasDriftedDetails { + work.Status.ManifestConditions[0].DriftDetails = &placementv1beta1.DriftDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 2, + ObservedDrifts: []placementv1beta1.PatchDetail{ + configmapPatchDetail, + }, + } + work.Status.ManifestConditions[1].DriftDetails = &placementv1beta1.DriftDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + } + } Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed()) By(fmt.Sprintf("resource work `%s` is marked as available", work.Name)) } -func markOneManifestAvailable(work *placementv1beta1.Work) { +func markOneManifestAvailable(work *placementv1beta1.Work, hasDriftedManifest bool) { work.Status.ManifestConditions = []placementv1beta1.ManifestCondition{ { Identifier: placementv1beta1.WorkResourceIdentifier{ @@ -2212,6 +2614,16 @@ func markOneManifestAvailable(work *placementv1beta1.Work) { }, }, } + if hasDriftedManifest { + work.Status.ManifestConditions[1].DriftDetails = &placementv1beta1.DriftDetails{ + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + ObservedInMemberClusterGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + servicePatchDetail, + }, + } + } Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed()) By(fmt.Sprintf("resource work `%s` is marked as available", work.Name)) } diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index d706e9e45..5fbcc54c7 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -1,10 +1,10 @@ +package workgenerator + /* Copyright (c) Microsoft Corporation. Licensed under the MIT license. */ -package workgenerator - import ( "context" "errors" @@ -712,14 +712,18 @@ func TestBuildAllWorkAvailableCondition(t *testing.T) { } func TestSetBindingStatus(t *testing.T) { + timeNow := time.Now() tests := map[string]struct { - works map[string]*fleetv1beta1.Work - maxFailedResourcePlacementLimit *int - want []fleetv1beta1.FailedResourcePlacement + works map[string]*fleetv1beta1.Work + maxFailedResourcePlacementLimit *int + wantFailedResourcePlacements []fleetv1beta1.FailedResourcePlacement + maxDriftedResourcePlacementLimit *int + wantDriftedResourcePlacements []fleetv1beta1.DriftedResourcePlacement + maxDiffedResourcePlacementLimit *int + wantDiffedResourcePlacements []fleetv1beta1.DiffedResourcePlacement }{ "NoWorks": { works: map[string]*fleetv1beta1.Work{}, - want: nil, }, "both work are available": { works: map[string]*fleetv1beta1.Work{ @@ -796,7 +800,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - want: nil, }, "One work has one not available and one work has one not applied": { works: map[string]*fleetv1beta1.Work{ @@ -909,7 +912,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - want: []fleetv1beta1.FailedResourcePlacement{ + wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", @@ -1050,7 +1053,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, maxFailedResourcePlacementLimit: ptr.To(1), - want: []fleetv1beta1.FailedResourcePlacement{ + wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", @@ -1177,7 +1180,145 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - want: []fleetv1beta1.FailedResourcePlacement{ + wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + "exceed the maxDriftedResourcePlacementLimit": { + works: map[string]*fleetv1beta1.Work{ + "work1": { + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + DriftDetails: &fleetv1beta1.DriftDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 2, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/0/port", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + "work2": { + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name-1", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + DriftDetails: &fleetv1beta1.DriftDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 2, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Second)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/metadata/labels/label1", + ValueInHub: "key1", + ValueInMember: "key2", + }, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name-1", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + maxDriftedResourcePlacementLimit: ptr.To(1), + wantDriftedResourcePlacements: []fleetv1beta1.DriftedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", @@ -1186,18 +1327,168 @@ func TestSetBindingStatus(t *testing.T) { Name: "svc-name", Namespace: "svc-namespace", }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 2, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/0/port", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + "exceed the maxDiffedResourcePlacementLimit": { + works: map[string]*fleetv1beta1.Work{ + "work1": { + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 2, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/port", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + "work2": { + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name-1", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 2, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Second)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/metadata/labels/label1", + ValueInHub: "key1", + ValueInMember: "key2", + }, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name-1", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, Condition: metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, }, }, }, + maxDiffedResourcePlacementLimit: ptr.To(1), + wantDiffedResourcePlacements: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 2, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/port", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, }, } originalMaxFailedResourcePlacementLimit := maxFailedResourcePlacementLimit + originalMaxDriftedResourcePlacementLimit := maxDriftedResourcePlacementLimit + originalMaxDiffedResourcePlacementLimit := maxDiffedResourcePlacementLimit defer func() { maxFailedResourcePlacementLimit = originalMaxFailedResourcePlacementLimit + maxDriftedResourcePlacementLimit = originalMaxDriftedResourcePlacementLimit + maxDiffedResourcePlacementLimit = originalMaxDiffedResourcePlacementLimit }() for name, tt := range tests { t.Run(name, func(t *testing.T) { @@ -1207,18 +1498,30 @@ func TestSetBindingStatus(t *testing.T) { maxFailedResourcePlacementLimit = originalMaxFailedResourcePlacementLimit } + if tt.maxDriftedResourcePlacementLimit != nil { + maxDriftedResourcePlacementLimit = *tt.maxDriftedResourcePlacementLimit + } else { + maxDriftedResourcePlacementLimit = originalMaxDriftedResourcePlacementLimit + } + + if tt.maxDiffedResourcePlacementLimit != nil { + maxDiffedResourcePlacementLimit = *tt.maxDiffedResourcePlacementLimit + } else { + maxDiffedResourcePlacementLimit = originalMaxDiffedResourcePlacementLimit + } + binding := &fleetv1beta1.ClusterResourceBinding{} setBindingStatus(tt.works, binding) got := binding.Status.FailedPlacements - // setBindingStatus is using map to populate the failedResourcePlacement. + // setBindingStatus is using map to populate the placements. // There is no default order in traversing the map. - // When the result of failedResourcePlacement exceeds the limit, the result will be truncated and cannot be + // When the result of Placements exceeds the limit, the result will be truncated and cannot be // guaranteed. - if maxFailedResourcePlacementLimit == len(tt.want) { + if maxFailedResourcePlacementLimit == len(tt.wantFailedResourcePlacements) { opt := cmp.Comparer(func(x, y fleetv1beta1.FailedResourcePlacement) bool { return x.Condition.Status == y.Condition.Status // condition should be set as false }) - if diff := cmp.Diff(got, tt.want, opt); diff != "" { + if diff := cmp.Diff(got, tt.wantFailedResourcePlacements, opt); diff != "" { t.Errorf("setBindingStatus got FailedPlacements mismatch (-got +want):\n%s", diff) } return @@ -1235,9 +1538,65 @@ func TestSetBindingStatus(t *testing.T) { return i.Name < j.Name }), } - if diff := cmp.Diff(got, tt.want, statusCmpOptions...); diff != "" { + if diff := cmp.Diff(got, tt.wantFailedResourcePlacements, statusCmpOptions...); diff != "" { t.Errorf("setBindingStatus got FailedPlacements mismatch (-got +want):\n%s", diff) } + + gotDrifted := binding.Status.DriftedPlacements + if maxDriftedResourcePlacementLimit == len(tt.wantDriftedResourcePlacements) { + opt := []cmp.Option{ + cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), + cmp.Comparer(func(t1, t2 metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return true // treat them as equal + } + if t1.Time.After(t2.Time) { + t1, t2 = t2, t1 // ensure t1 is always before t2 + } + // we're within the margin (10s) if x + margin >= y + return !t1.Time.Add(10 * time.Second).Before(t2.Time) + }), + } + if diff := cmp.Diff(gotDrifted, tt.wantDriftedResourcePlacements, opt...); diff != "" { + t.Errorf("setBindingStatus got DriftedPlacements mismatch (-got +want):\n%s", diff) + } + return + } + + resourceCmpOptions := []cmp.Option{ + cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), + } + if diff := cmp.Diff(gotDrifted, tt.wantDriftedResourcePlacements, resourceCmpOptions...); diff != "" { + t.Errorf("setBindingStatus got DriftedPlacements mismatch (-got +want):\n%s", diff) + } + + gotDiffed := binding.Status.DiffedPlacements + if maxDiffedResourcePlacementLimit == len(tt.wantDiffedResourcePlacements) { + opt := []cmp.Option{ + cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), + cmp.Comparer(func(t1, t2 metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return true // treat them as equal + } + if t1.Time.After(t2.Time) { + t1, t2 = t2, t1 // ensure t1 is always before t2 + } + // we're within the margin (10s) if x + margin >= y + return !t1.Time.Add(10 * time.Second).Before(t2.Time) + }), + } + if diff := cmp.Diff(gotDiffed, tt.wantDiffedResourcePlacements, opt...); diff != "" { + t.Errorf("setBindingStatus got DiffedPlacements mismatch (-got +want):\n%s", diff) + } + return + } + + resourceCmpOptions = []cmp.Option{ + cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), + } + if diff := cmp.Diff(gotDiffed, tt.wantDiffedResourcePlacements, resourceCmpOptions...); diff != "" { + t.Errorf("setBindingStatus got DiffedPlacements mismatch (-got +want):\n%s", diff) + } }) } } @@ -1716,6 +2075,550 @@ func TestExtractFailedResourcePlacementsFromWork(t *testing.T) { } } +func TestExtractDriftedResourcePlacementsFromWork(t *testing.T) { + var options = []cmp.Option{ + cmpopts.SortSlices(func(s1, s2 string) bool { + return s1 < s2 + }), + cmpopts.SortSlices(func(n1, n2 fleetv1beta1.NamespacedName) bool { + if n1.Namespace == n2.Namespace { + return n1.Name < n2.Name + } + return n1.Namespace < n2.Namespace + }), + cmpopts.SortSlices(func(f1, f2 fleetv1beta1.DriftedResourcePlacement) bool { + return f1.ResourceIdentifier.Kind < f2.ResourceIdentifier.Kind + }), + cmp.Comparer(func(t1, t2 metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return true // treat them as equal + } + if t1.Time.After(t2.Time) { + t1, t2 = t2, t1 // ensure t1 is always before t2 + } + // we're within the margin (10s) if x + margin >= y + return !t1.Time.Add(10 * time.Second).Before(t2.Time) + }), + } + timeNow := time.Now() + workGeneration := int64(12) + tests := []struct { + name string + work fleetv1beta1.Work + want []fleetv1beta1.DriftedResourcePlacement + }{ + { + name: "work with drifted details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + DriftDetails: &fleetv1beta1.DriftDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 12, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 12, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + { + name: "work with no drift details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DriftedResourcePlacement{}, + }, + { + name: "work with enveloped object drifted details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + Labels: map[string]string{ + fleetv1beta1.EnvelopeNameLabel: "test-env", + fleetv1beta1.EnvelopeNamespaceLabel: "test-env-ns", + fleetv1beta1.EnvelopeTypeLabel: "pod", + }, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + DriftDetails: &fleetv1beta1.DriftDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 12, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/containers/0/image", + ValueInHub: "nginx:1.19", + ValueInMember: "nginx:1.20", + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + Envelope: &fleetv1beta1.EnvelopeIdentifier{ + Name: "test-env", + Namespace: "test-env-ns", + Type: "pod", + }, + }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 12, + FirstDriftedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/containers/0/image", + ValueInHub: "nginx:1.19", + ValueInMember: "nginx:1.20", + }, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := extractDriftedResourcePlacementsFromWork(&tc.work) + if diff := cmp.Diff(tc.want, got, options...); diff != "" { + t.Errorf("extractDriftedResourcePlacementsFromWork() status mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestExtractDiffedResourcePlacementsFromWork(t *testing.T) { + var options = []cmp.Option{ + cmpopts.SortSlices(func(s1, s2 string) bool { + return s1 < s2 + }), + cmpopts.SortSlices(func(n1, n2 fleetv1beta1.NamespacedName) bool { + if n1.Namespace == n2.Namespace { + return n1.Name < n2.Name + } + return n1.Namespace < n2.Namespace + }), + cmpopts.SortSlices(func(f1, f2 fleetv1beta1.DiffedResourcePlacement) bool { + return f1.ResourceIdentifier.Kind < f2.ResourceIdentifier.Kind + }), + cmp.Comparer(func(t1, t2 metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return true // treat them as equal + } + if t1.Time.After(t2.Time) { + t1, t2 = t2, t1 // ensure t1 is always before t2 + } + // we're within the margin (10s) if x + margin >= y + return !t1.Time.Add(10 * time.Second).Before(t2.Time) + }), + } + timeNow := time.Now() + workGeneration := int64(12) + tests := []struct { + name string + work fleetv1beta1.Work + want []fleetv1beta1.DiffedResourcePlacement + }{ + { + name: "work with diffed details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 12, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 12, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + }, + { + name: "work with no diff details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DiffedResourcePlacement{}, + }, + { + name: "work with enveloped object diffed details", + work: fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: workGeneration, + Labels: map[string]string{ + fleetv1beta1.EnvelopeNameLabel: "test-env", + fleetv1beta1.EnvelopeNamespaceLabel: "test-env-ns", + fleetv1beta1.EnvelopeTypeLabel: "pod", + }, + }, + Status: fleetv1beta1.WorkStatus{ + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservationTime: metav1.NewTime(timeNow), + ObservedInMemberClusterGeneration: 12, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/containers/0/image", + ValueInHub: "nginx:1.19", + ValueInMember: "nginx:1.20", + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: workGeneration, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + ObservedGeneration: workGeneration, + }, + }, + }, + }, + want: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + Envelope: &fleetv1beta1.EnvelopeIdentifier{ + Name: "test-env", + Namespace: "test-env-ns", + Type: "pod", + }, + }, + ObservationTime: metav1.NewTime(timeNow), + TargetClusterObservedGeneration: 12, + FirstDiffedObservedTime: metav1.NewTime(timeNow.Add(-time.Hour)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/containers/0/image", + ValueInHub: "nginx:1.19", + ValueInMember: "nginx:1.20", + }, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := extractDiffedResourcePlacementsFromWork(&tc.work) + if diff := cmp.Diff(tc.want, got, options...); diff != "" { + t.Errorf("extractDiffedResourcePlacementsFromWork() status mismatch (-want, +got):\n%s", diff) + } + }) + } +} + func TestUpdateBindingStatusWithRetry(t *testing.T) { lastTransitionTime := metav1.NewTime(time.Now()) tests := []struct { diff --git a/pkg/utils/common.go b/pkg/utils/common.go index bf8be3b60..cb6f80ceb 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -516,6 +516,17 @@ var LessFuncResourceIdentifier = func(a, b placementv1beta1.ResourceIdentifier) return aStr < bStr } +// LessFuncPatchDetail is a less function for sorting patch details +var LessFuncPatchDetail = func(a, b placementv1beta1.PatchDetail) bool { + if a.Path != b.Path { + return a.Path < b.Path + } + if a.ValueInMember != b.ValueInMember { + return a.ValueInMember < b.ValueInMember + } + return a.ValueInHub < b.ValueInHub +} + // LessFuncFailedResourcePlacements is a less function for sorting failed resource placements var LessFuncFailedResourcePlacements = func(a, b placementv1beta1.FailedResourcePlacement) bool { var aStr, bStr string @@ -556,6 +567,40 @@ func IsFailedResourcePlacementsEqual(oldFailedResourcePlacements, newFailedResou return true } +// LessFuncDriftedResourcePlacements is a less function for sorting drifted resource placements +var LessFuncDriftedResourcePlacements = func(a, b placementv1beta1.DriftedResourcePlacement) bool { + var aStr, bStr string + if a.Envelope != nil { + aStr = fmt.Sprintf(ResourceIdentifierWithEnvelopeIdentifierStringFormat, a.Group, a.Version, a.Kind, a.Namespace, a.Name, a.Envelope.Type, a.Envelope.Namespace, a.Envelope.Name) + } else { + aStr = fmt.Sprintf(ResourceIdentifierStringFormat, a.Group, a.Version, a.Kind, a.Namespace, a.Name) + } + if b.Envelope != nil { + bStr = fmt.Sprintf(ResourceIdentifierWithEnvelopeIdentifierStringFormat, b.Group, b.Version, b.Kind, b.Namespace, b.Name, b.Envelope.Type, b.Envelope.Namespace, b.Envelope.Name) + } else { + bStr = fmt.Sprintf(ResourceIdentifierStringFormat, b.Group, b.Version, b.Kind, b.Namespace, b.Name) + + } + return aStr < bStr +} + +// LessFuncDiffedResourcePlacements is a less function for sorting drifted resource placements +var LessFuncDiffedResourcePlacements = func(a, b placementv1beta1.DiffedResourcePlacement) bool { + var aStr, bStr string + if a.Envelope != nil { + aStr = fmt.Sprintf(ResourceIdentifierWithEnvelopeIdentifierStringFormat, a.Group, a.Version, a.Kind, a.Namespace, a.Name, a.Envelope.Type, a.Envelope.Namespace, a.Envelope.Name) + } else { + aStr = fmt.Sprintf(ResourceIdentifierStringFormat, a.Group, a.Version, a.Kind, a.Namespace, a.Name) + } + if b.Envelope != nil { + bStr = fmt.Sprintf(ResourceIdentifierWithEnvelopeIdentifierStringFormat, b.Group, b.Version, b.Kind, b.Namespace, b.Name, b.Envelope.Type, b.Envelope.Namespace, b.Envelope.Name) + } else { + bStr = fmt.Sprintf(ResourceIdentifierStringFormat, b.Group, b.Version, b.Kind, b.Namespace, b.Name) + + } + return aStr < bStr +} + // IsFleetAnnotationPresent returns true if a key with fleet prefix is present in the annotations map. func IsFleetAnnotationPresent(annotations map[string]string) bool { for k := range annotations { diff --git a/test/apis/v1alpha1/zz_generated.deepcopy.go b/test/apis/v1alpha1/zz_generated.deepcopy.go index 0b5d2e30b..ef7e4433a 100644 --- a/test/apis/v1alpha1/zz_generated.deepcopy.go +++ b/test/apis/v1alpha1/zz_generated.deepcopy.go @@ -10,7 +10,7 @@ Licensed under the MIT license. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" )