diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 57ec328c7..58b6285cc 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -15,6 +15,8 @@ import ( "strings" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "go.uber.org/atomic" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" @@ -1148,51 +1150,39 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { "Failed to process an update event for work object") return } - oldAppliedCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) - newAppliedCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) - oldAvailableCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - newAvailableCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - // we try to filter out events, we only need to handle the updated event if the applied or available condition flip between true and false - // or the failed/diffed/drifted placements are changed. - if condition.EqualCondition(oldAppliedCondition, newAppliedCondition) && condition.EqualCondition(oldAvailableCondition, newAvailableCondition) { - oldDriftedPlacements := extractDriftedResourcePlacementsFromWork(oldWork) - newDriftedPlacements := extractDriftedResourcePlacementsFromWork(newWork) - driftsEqual := utils.IsDriftedResourcePlacementsEqual(oldDriftedPlacements, newDriftedPlacements) - if condition.IsConditionStatusFalse(newAppliedCondition, newWork.Generation) || condition.IsConditionStatusFalse(newAvailableCondition, newWork.Generation) { - diffsEqual := true - if condition.IsConditionStatusFalse(newAppliedCondition, newWork.Generation) { - oldDiffedPlacements := extractDiffedResourcePlacementsFromWork(oldWork) - newDiffedPlacements := extractDiffedResourcePlacementsFromWork(newWork) - diffsEqual = utils.IsDiffedResourcePlacementsEqual(oldDiffedPlacements, newDiffedPlacements) - } - // we need to compare the failed placement if the work is not applied or available - oldFailedPlacements := extractFailedResourcePlacementsFromWork(oldWork) - newFailedPlacements := extractFailedResourcePlacementsFromWork(newWork) - if driftsEqual && diffsEqual && utils.IsFailedResourcePlacementsEqual(oldFailedPlacements, newFailedPlacements) { - klog.V(2).InfoS("The placement lists didn't change on failed work, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) - return - } - } else { - oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] - newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] - if oldResourceSnapshot == "" || newResourceSnapshot == "" { - klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")), - "Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot, - "newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot) - return - } - // There is an edge case that, the work spec is the same but from different resourceSnapshots. - // WorkGenerator will update the work because of the label changes, but the generation is the same. - // When the normal update happens, the controller will set the applied condition as false and wait - // until the work condition has been changed. - // In this edge case, we need to requeue the binding to update the binding status. - if oldResourceSnapshot == newResourceSnapshot && driftsEqual { - klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) - return - } + lessFuncCondition := func(a, b metav1.Condition) bool { + return a.Type < b.Type + } + workStatusCmpOptions := cmp.Options{ + cmpopts.SortSlices(lessFuncCondition), + cmpopts.SortSlices(utils.LessFuncResourceIdentifier), + cmpopts.SortSlices(utils.LessFuncPatchDetail), + utils.IgnoreConditionLTTAndMessageFields, + cmpopts.EquateEmpty(), + } + if diff := cmp.Diff(oldWork.Status, newWork.Status, workStatusCmpOptions); diff != "" { + klog.V(2).InfoS("Work status has been changed", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + } else { + oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] + newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] + if oldResourceSnapshot == "" || newResourceSnapshot == "" { + klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")), + "Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot, + "newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot) + return + } + // There is an edge case that, the work spec is the same but from different resourceSnapshots. + // WorkGenerator will update the work because of the label changes, but the generation is the same. + // When the normal update happens, the controller will set the applied condition as false and wait + // until the work condition has been changed. + // In this edge case, we need to requeue the binding to update the binding status. + if oldResourceSnapshot == newResourceSnapshot { + klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + return } } + // We need to update the binding status in this case klog.V(2).InfoS("Received a work update event that we need to handle", "work", klog.KObj(newWork), "parentBindingName", parentBindingName) queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 8b6a10f29..a54db9737 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -451,7 +451,7 @@ var _ = Describe("Test Work Generator Controller", func() { // mark one of the failed manifests as available but no change in the overall condition markOneManifestAvailable(&work, false) // check the binding status that it should be applied but not available and with one failed placement - verifyBindStatusNotAvailableWithOnePlacement(binding, false, true) + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false) // mark the work available directly markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually @@ -466,11 +466,12 @@ var _ = Describe("Test Work Generator Controller", func() { // change one of the failed manifests to be applied and diff manifests to be aligned (applied is now true so no diff placements) markWorkAsAppliedButNotAvailableWithFailedManifest(&work, false) // check the binding status that it should be applied but not available and with two failed placement + // placement list should be changed verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, false) // mark one of the failed manifests as available but no change in the overall condition markOneManifestAvailable(&work, false) // check the binding status that it should be applied but not available and with one failed placement - verifyBindStatusNotAvailableWithOnePlacement(binding, false, true) + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false) // mark the work available directly markWorkAvailable(&work) // check the binding status that it should be marked as available true eventually @@ -486,10 +487,21 @@ var _ = Describe("Test Work Generator Controller", func() { markWorkAsAppliedButNotAvailableWithFailedManifest(&work, true) // check the binding status that it should be applied but not available and with two failed placement and 2 drifted placement verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, true) + // mark one of the failed manifests as available and no drift placements but no change in the overall condition + markOneManifestAvailable(&work, true) + // check the binding status that it should be applied but not available and with one failed placement and one drifted placement + // placement list should be changed + verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, true) // mark the work available directly markWorkAvailable(&work) - // check the binding status that it should be marked as available true eventually with 2 drift placements - verifyBindStatusAvail(binding, false, true) + // check the binding status that it should be marked as available true eventually with one drift placement + verifyBindStatusAvailableWithOnePlacement(binding, false) + // mark the work with no drift placements + markWorkWithNoDrift(&work) + // check the binding status that it should be marked as available true eventually with no drift placement + // placement list should be changed + verifyBindStatusAvail(binding, false, false) + }) It("Should continue to update the binding status even if the master resource snapshot is deleted after the work is synced", func() { @@ -2073,7 +2085,7 @@ func verifyBindStatusNotAvailableWithTwoPlacements(binding *placementv1beta1.Clu }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) } -func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacement bool) { +func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacement, hasDriftedPlacement bool) { Eventually(func() string { Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) overrideReason := condition.OverrideNotSpecifiedReason @@ -2133,6 +2145,96 @@ func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.Clus }, } } + + if hasDriftedPlacement { + wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + } + } + return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) + }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) +} + +func verifyBindStatusAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) { + Eventually(func() string { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + overrideReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideReason = condition.OverriddenSucceededReason + } + wantStatus := placementv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourceBindingRolloutStarted), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingOverridden), + Status: metav1.ConditionTrue, + Reason: overrideReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingWorkSynchronized), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAvailableReason, + ObservedGeneration: binding.GetGeneration(), + }, + }, + DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: specificTime}, + FirstDriftedObservedTime: metav1.Time{Time: specificTime}, + TargetClusterObservedGeneration: 1, + ObservedDrifts: []placementv1beta1.PatchDetail{ + { + Path: "/spec/ports/1/containerPort", + ValueInHub: "80", + ValueInMember: "90", + }, + }, + }, + }, + } return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) } @@ -2232,6 +2334,67 @@ func markWorkAvailable(work *placementv1beta1.Work) { By(fmt.Sprintf("resource work `%s` is marked as available", work.Name)) } +func markWorkWithNoDrift(work *placementv1beta1.Work) { + work.Status.ManifestConditions = []placementv1beta1.ManifestCondition{ + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "fakeAppliedManifest", + Message: "fake apply manifest", + LastTransitionTime: metav1.Now(), + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "fakeAvailableManifest", + Message: "fake available manifest", + LastTransitionTime: metav1.Now(), + }, + }, + DriftDetails: nil, + }, + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "fakeAppliedManifest", + Message: "fake apply manifest", + LastTransitionTime: metav1.Now(), + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "fakeAvailableManifest", + Message: "fake available manifest", + LastTransitionTime: metav1.Now(), + }, + }, + DriftDetails: nil, + }, + } + Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed()) + By(fmt.Sprintf("resource work `%s` is marked with no drift", work.Name)) +} + // markWorkWithFailedToApplyAndNotAvailable marks the work as not applied with failedPlacement func markWorkWithFailedToApplyAndNotAvailable(work *placementv1beta1.Work, hasDiffedDetails, hasDriftedDetails bool) { meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{ diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index ff5377f6f..5fbcc54c7 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -723,10 +723,7 @@ func TestSetBindingStatus(t *testing.T) { wantDiffedResourcePlacements []fleetv1beta1.DiffedResourcePlacement }{ "NoWorks": { - works: map[string]*fleetv1beta1.Work{}, - wantFailedResourcePlacements: nil, - wantDiffedResourcePlacements: nil, - wantDriftedResourcePlacements: nil, + works: map[string]*fleetv1beta1.Work{}, }, "both work are available": { works: map[string]*fleetv1beta1.Work{ @@ -803,9 +800,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantFailedResourcePlacements: nil, - wantDiffedResourcePlacements: nil, - wantDriftedResourcePlacements: nil, }, "One work has one not available and one work has one not applied": { works: map[string]*fleetv1beta1.Work{ @@ -946,8 +940,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantDiffedResourcePlacements: nil, - wantDriftedResourcePlacements: nil, }, "One work has one not available and one work has one not applied (exceed the maxFailedResourcePlacementLimit)": { works: map[string]*fleetv1beta1.Work{ @@ -1076,8 +1068,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantDiffedResourcePlacements: nil, - wantDriftedResourcePlacements: nil, }, "One work has one not available and one work all available": { works: map[string]*fleetv1beta1.Work{ @@ -1205,8 +1195,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantDiffedResourcePlacements: nil, - wantDriftedResourcePlacements: nil, }, "exceed the maxDriftedResourcePlacementLimit": { works: map[string]*fleetv1beta1.Work{ @@ -1351,7 +1339,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantDiffedResourcePlacements: nil, }, "exceed the maxDiffedResourcePlacementLimit": { works: map[string]*fleetv1beta1.Work{ @@ -1470,7 +1457,6 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, - wantDriftedResourcePlacements: nil, maxDiffedResourcePlacementLimit: ptr.To(1), wantDiffedResourcePlacements: []fleetv1beta1.DiffedResourcePlacement{ { @@ -1559,18 +1545,7 @@ func TestSetBindingStatus(t *testing.T) { gotDrifted := binding.Status.DriftedPlacements if maxDriftedResourcePlacementLimit == len(tt.wantDriftedResourcePlacements) { opt := []cmp.Option{ - cmpopts.SortSlices(func(s1, s2 string) bool { - return s1 < s2 - }), - cmpopts.SortSlices(func(n1, n2 fleetv1beta1.NamespacedName) bool { - if n1.Namespace == n2.Namespace { - return n1.Name < n2.Name - } - return n1.Namespace < n2.Namespace - }), - cmp.Comparer(func(f1, f2 fleetv1beta1.DriftedResourcePlacement) bool { - return f1.ResourceIdentifier.Kind == f2.ResourceIdentifier.Kind - }), + cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), cmp.Comparer(func(t1, t2 metav1.Time) bool { if t1.Time.IsZero() || t2.Time.IsZero() { return true // treat them as equal @@ -1589,15 +1564,7 @@ func TestSetBindingStatus(t *testing.T) { } resourceCmpOptions := []cmp.Option{ - cmpopts.SortSlices(func(i, j fleetv1beta1.DriftedResourcePlacement) bool { - if i.Group < j.Group { - return true - } - if i.Kind < j.Kind { - return true - } - return i.Name < j.Name - }), + cmpopts.SortSlices(utils.LessFuncDriftedResourcePlacements), } if diff := cmp.Diff(gotDrifted, tt.wantDriftedResourcePlacements, resourceCmpOptions...); diff != "" { t.Errorf("setBindingStatus got DriftedPlacements mismatch (-got +want):\n%s", diff) @@ -1606,18 +1573,7 @@ func TestSetBindingStatus(t *testing.T) { gotDiffed := binding.Status.DiffedPlacements if maxDiffedResourcePlacementLimit == len(tt.wantDiffedResourcePlacements) { opt := []cmp.Option{ - cmpopts.SortSlices(func(s1, s2 string) bool { - return s1 < s2 - }), - cmpopts.SortSlices(func(n1, n2 fleetv1beta1.NamespacedName) bool { - if n1.Namespace == n2.Namespace { - return n1.Name < n2.Name - } - return n1.Namespace < n2.Namespace - }), - cmpopts.SortSlices(func(f1, f2 fleetv1beta1.DiffedResourcePlacement) bool { - return f1.ResourceIdentifier.Kind < f2.ResourceIdentifier.Kind - }), + cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), cmp.Comparer(func(t1, t2 metav1.Time) bool { if t1.Time.IsZero() || t2.Time.IsZero() { return true // treat them as equal @@ -1636,15 +1592,7 @@ func TestSetBindingStatus(t *testing.T) { } resourceCmpOptions = []cmp.Option{ - cmpopts.SortSlices(func(i, j fleetv1beta1.DiffedResourcePlacement) bool { - if i.Group < j.Group { - return true - } - if i.Kind < j.Kind { - return true - } - return i.Name < j.Name - }), + cmpopts.SortSlices(utils.LessFuncDiffedResourcePlacements), } if diff := cmp.Diff(gotDiffed, tt.wantDiffedResourcePlacements, resourceCmpOptions...); diff != "" { t.Errorf("setBindingStatus got DiffedPlacements mismatch (-got +want):\n%s", diff) diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 5dee401cc..cb6f80ceb 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -516,6 +516,17 @@ var LessFuncResourceIdentifier = func(a, b placementv1beta1.ResourceIdentifier) return aStr < bStr } +// LessFuncPatchDetail is a less function for sorting patch details +var LessFuncPatchDetail = func(a, b placementv1beta1.PatchDetail) bool { + if a.Path != b.Path { + return a.Path < b.Path + } + if a.ValueInMember != b.ValueInMember { + return a.ValueInMember < b.ValueInMember + } + return a.ValueInHub < b.ValueInHub +} + // LessFuncFailedResourcePlacements is a less function for sorting failed resource placements var LessFuncFailedResourcePlacements = func(a, b placementv1beta1.FailedResourcePlacement) bool { var aStr, bStr string @@ -573,32 +584,6 @@ var LessFuncDriftedResourcePlacements = func(a, b placementv1beta1.DriftedResour return aStr < bStr } -func IsDriftedResourcePlacementsEqual(oldDriftedResourcePlacements, newDriftedResourcePlacements []placementv1beta1.DriftedResourcePlacement) bool { - if len(oldDriftedResourcePlacements) != len(newDriftedResourcePlacements) { - return false - } - sort.Slice(oldDriftedResourcePlacements, func(i, j int) bool { - return LessFuncDriftedResourcePlacements(oldDriftedResourcePlacements[i], oldDriftedResourcePlacements[j]) - }) - sort.Slice(newDriftedResourcePlacements, func(i, j int) bool { - return LessFuncDriftedResourcePlacements(newDriftedResourcePlacements[i], newDriftedResourcePlacements[j]) - }) - for i := range oldDriftedResourcePlacements { - oldDriftedResourcePlacement := oldDriftedResourcePlacements[i] - newDriftedResourcePlacement := newDriftedResourcePlacements[i] - if !equality.Semantic.DeepEqual(oldDriftedResourcePlacement.ResourceIdentifier, newDriftedResourcePlacement.ResourceIdentifier) { - return false - } - if &oldDriftedResourcePlacement.TargetClusterObservedGeneration != &newDriftedResourcePlacement.TargetClusterObservedGeneration { - return false - } - if &oldDriftedResourcePlacement.ObservationTime != &newDriftedResourcePlacement.ObservationTime { - return false - } - } - return true -} - // LessFuncDiffedResourcePlacements is a less function for sorting drifted resource placements var LessFuncDiffedResourcePlacements = func(a, b placementv1beta1.DiffedResourcePlacement) bool { var aStr, bStr string @@ -616,32 +601,6 @@ var LessFuncDiffedResourcePlacements = func(a, b placementv1beta1.DiffedResource return aStr < bStr } -func IsDiffedResourcePlacementsEqual(oldDiffedResourcePlacements, newDiffedResourcePlacements []placementv1beta1.DiffedResourcePlacement) bool { - if len(oldDiffedResourcePlacements) != len(newDiffedResourcePlacements) { - return false - } - sort.Slice(oldDiffedResourcePlacements, func(i, j int) bool { - return LessFuncDiffedResourcePlacements(oldDiffedResourcePlacements[i], oldDiffedResourcePlacements[j]) - }) - sort.Slice(newDiffedResourcePlacements, func(i, j int) bool { - return LessFuncDiffedResourcePlacements(newDiffedResourcePlacements[i], newDiffedResourcePlacements[j]) - }) - for i := range oldDiffedResourcePlacements { - oldDiffedResourcePlacement := oldDiffedResourcePlacements[i] - newDiffedResourcePlacement := newDiffedResourcePlacements[i] - if !equality.Semantic.DeepEqual(oldDiffedResourcePlacement.ResourceIdentifier, newDiffedResourcePlacement.ResourceIdentifier) { - return false - } - if &oldDiffedResourcePlacement.TargetClusterObservedGeneration != &newDiffedResourcePlacement.TargetClusterObservedGeneration { - return false - } - if &oldDiffedResourcePlacement.ObservationTime != &newDiffedResourcePlacement.ObservationTime { - return false - } - } - return true -} - // IsFleetAnnotationPresent returns true if a key with fleet prefix is present in the annotations map. func IsFleetAnnotationPresent(annotations map[string]string) bool { for k := range annotations {