Skip to content

Commit

Permalink
compare workStatus directly & fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
britaniar committed Dec 5, 2024
1 parent 91f0045 commit fdaf6a9
Show file tree
Hide file tree
Showing 4 changed files with 216 additions and 156 deletions.
74 changes: 32 additions & 42 deletions pkg/controllers/workgenerator/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"strings"
"time"

"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -1148,51 +1150,39 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error {
"Failed to process an update event for work object")
return
}
oldAppliedCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied)
newAppliedCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied)
oldAvailableCondition := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable)
newAvailableCondition := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable)

// we try to filter out events, we only need to handle the updated event if the applied or available condition flip between true and false
// or the failed/diffed/drifted placements are changed.
if condition.EqualCondition(oldAppliedCondition, newAppliedCondition) && condition.EqualCondition(oldAvailableCondition, newAvailableCondition) {
oldDriftedPlacements := extractDriftedResourcePlacementsFromWork(oldWork)
newDriftedPlacements := extractDriftedResourcePlacementsFromWork(newWork)
driftsEqual := utils.IsDriftedResourcePlacementsEqual(oldDriftedPlacements, newDriftedPlacements)
if condition.IsConditionStatusFalse(newAppliedCondition, newWork.Generation) || condition.IsConditionStatusFalse(newAvailableCondition, newWork.Generation) {
diffsEqual := true
if condition.IsConditionStatusFalse(newAppliedCondition, newWork.Generation) {
oldDiffedPlacements := extractDiffedResourcePlacementsFromWork(oldWork)
newDiffedPlacements := extractDiffedResourcePlacementsFromWork(newWork)
diffsEqual = utils.IsDiffedResourcePlacementsEqual(oldDiffedPlacements, newDiffedPlacements)
}
// we need to compare the failed placement if the work is not applied or available
oldFailedPlacements := extractFailedResourcePlacementsFromWork(oldWork)
newFailedPlacements := extractFailedResourcePlacementsFromWork(newWork)
if driftsEqual && diffsEqual && utils.IsFailedResourcePlacementsEqual(oldFailedPlacements, newFailedPlacements) {
klog.V(2).InfoS("The placement lists didn't change on failed work, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork))
return
}
} else {
oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel]
newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel]
if oldResourceSnapshot == "" || newResourceSnapshot == "" {
klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")),
"Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot,
"newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot)
return
}
// There is an edge case that, the work spec is the same but from different resourceSnapshots.
// WorkGenerator will update the work because of the label changes, but the generation is the same.
// When the normal update happens, the controller will set the applied condition as false and wait
// until the work condition has been changed.
// In this edge case, we need to requeue the binding to update the binding status.
if oldResourceSnapshot == newResourceSnapshot && driftsEqual {
klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork))
return
}
lessFuncCondition := func(a, b metav1.Condition) bool {
return a.Type < b.Type
}
workStatusCmpOptions := cmp.Options{
cmpopts.SortSlices(lessFuncCondition),
cmpopts.SortSlices(utils.LessFuncResourceIdentifier),
cmpopts.SortSlices(utils.LessFuncPatchDetail),
utils.IgnoreConditionLTTAndMessageFields,
cmpopts.EquateEmpty(),
}
if diff := cmp.Diff(oldWork.Status, newWork.Status, workStatusCmpOptions); diff != "" {
klog.V(2).InfoS("Work status has been changed", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork))
} else {
oldResourceSnapshot := oldWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel]
newResourceSnapshot := newWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel]
if oldResourceSnapshot == "" || newResourceSnapshot == "" {
klog.ErrorS(controller.NewUnexpectedBehaviorError(errors.New("found an invalid work without parent-resource-snapshot-index")),
"Could not find the parent resource snapshot index label", "oldWork", klog.KObj(oldWork), "oldResourceSnapshotLabelValue", oldResourceSnapshot,
"newWork", klog.KObj(newWork), "newResourceSnapshotLabelValue", newResourceSnapshot)
return
}
// There is an edge case that, the work spec is the same but from different resourceSnapshots.
// WorkGenerator will update the work because of the label changes, but the generation is the same.
// When the normal update happens, the controller will set the applied condition as false and wait
// until the work condition has been changed.
// In this edge case, we need to requeue the binding to update the binding status.
if oldResourceSnapshot == newResourceSnapshot {
klog.V(2).InfoS("The work applied or available condition stayed as true, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork))
return
}
}

// We need to update the binding status in this case
klog.V(2).InfoS("Received a work update event that we need to handle", "work", klog.KObj(newWork), "parentBindingName", parentBindingName)
queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Expand Down
173 changes: 168 additions & 5 deletions pkg/controllers/workgenerator/controller_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ var _ = Describe("Test Work Generator Controller", func() {
// mark one of the failed manifests as available but no change in the overall condition
markOneManifestAvailable(&work, false)
// check the binding status that it should be applied but not available and with one failed placement
verifyBindStatusNotAvailableWithOnePlacement(binding, false, true)
verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false)
// mark the work available directly
markWorkAvailable(&work)
// check the binding status that it should be marked as available true eventually
Expand All @@ -466,11 +466,12 @@ var _ = Describe("Test Work Generator Controller", func() {
// change one of the failed manifests to be applied and diff manifests to be aligned (applied is now true so no diff placements)
markWorkAsAppliedButNotAvailableWithFailedManifest(&work, false)
// check the binding status that it should be applied but not available and with two failed placement
// placement list should be changed
verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, false)
// mark one of the failed manifests as available but no change in the overall condition
markOneManifestAvailable(&work, false)
// check the binding status that it should be applied but not available and with one failed placement
verifyBindStatusNotAvailableWithOnePlacement(binding, false, true)
verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, false)
// mark the work available directly
markWorkAvailable(&work)
// check the binding status that it should be marked as available true eventually
Expand All @@ -486,10 +487,21 @@ var _ = Describe("Test Work Generator Controller", func() {
markWorkAsAppliedButNotAvailableWithFailedManifest(&work, true)
// check the binding status that it should be applied but not available and with two failed placement and 2 drifted placement
verifyBindStatusNotAvailableWithTwoPlacements(binding, false, true, true)
// mark one of the failed manifests as available and no drift placements but no change in the overall condition
markOneManifestAvailable(&work, true)
// check the binding status that it should be applied but not available and with one failed placement and one drifted placement
// placement list should be changed
verifyBindStatusNotAvailableWithOnePlacement(binding, false, true, true)
// mark the work available directly
markWorkAvailable(&work)
// check the binding status that it should be marked as available true eventually with 2 drift placements
verifyBindStatusAvail(binding, false, true)
// check the binding status that it should be marked as available true eventually with one drift placement
verifyBindStatusAvailableWithOnePlacement(binding, false)
// mark the work with no drift placements
markWorkWithNoDrift(&work)
// check the binding status that it should be marked as available true eventually with no drift placement
// placement list should be changed
verifyBindStatusAvail(binding, false, false)

})

It("Should continue to update the binding status even if the master resource snapshot is deleted after the work is synced", func() {
Expand Down Expand Up @@ -2073,7 +2085,7 @@ func verifyBindStatusNotAvailableWithTwoPlacements(binding *placementv1beta1.Clu
}, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name))
}

func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacement bool) {
func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacement, hasDriftedPlacement bool) {
Eventually(func() string {
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed())
overrideReason := condition.OverrideNotSpecifiedReason
Expand Down Expand Up @@ -2133,6 +2145,96 @@ func verifyBindStatusNotAvailableWithOnePlacement(binding *placementv1beta1.Clus
},
}
}

if hasDriftedPlacement {
wantStatus.DriftedPlacements = []placementv1beta1.DriftedResourcePlacement{
{
ResourceIdentifier: placementv1beta1.ResourceIdentifier{
Group: "",
Version: "v1",
Kind: "Service",
Name: "svc-name",
Namespace: "svc-namespace",
},
ObservationTime: metav1.Time{Time: specificTime},
FirstDriftedObservedTime: metav1.Time{Time: specificTime},
TargetClusterObservedGeneration: 1,
ObservedDrifts: []placementv1beta1.PatchDetail{
{
Path: "/spec/ports/1/containerPort",
ValueInHub: "80",
ValueInMember: "90",
},
},
},
}
}
return cmp.Diff(wantStatus, binding.Status, cmpConditionOption)
}, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name))
}

func verifyBindStatusAvailableWithOnePlacement(binding *placementv1beta1.ClusterResourceBinding, hasOverride bool) {
Eventually(func() string {
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed())
overrideReason := condition.OverrideNotSpecifiedReason
if hasOverride {
overrideReason = condition.OverriddenSucceededReason
}
wantStatus := placementv1beta1.ResourceBindingStatus{
Conditions: []metav1.Condition{
{
Type: string(placementv1beta1.ResourceBindingRolloutStarted),
Status: metav1.ConditionTrue,
Reason: condition.RolloutStartedReason,
ObservedGeneration: binding.GetGeneration(),
},
{
Type: string(placementv1beta1.ResourceBindingOverridden),
Status: metav1.ConditionTrue,
Reason: overrideReason,
ObservedGeneration: binding.GetGeneration(),
},
{
Type: string(placementv1beta1.ResourceBindingWorkSynchronized),
Status: metav1.ConditionTrue,
Reason: condition.AllWorkSyncedReason,
ObservedGeneration: binding.GetGeneration(),
},
{
Type: string(placementv1beta1.ResourceBindingApplied),
Status: metav1.ConditionTrue,
Reason: condition.AllWorkAppliedReason,
ObservedGeneration: binding.GetGeneration(),
},
{
Type: string(placementv1beta1.ResourceBindingAvailable),
Status: metav1.ConditionTrue,
Reason: condition.AllWorkAvailableReason,
ObservedGeneration: binding.GetGeneration(),
},
},
DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{
{
ResourceIdentifier: placementv1beta1.ResourceIdentifier{
Group: "",
Version: "v1",
Kind: "Service",
Name: "svc-name",
Namespace: "svc-namespace",
},
ObservationTime: metav1.Time{Time: specificTime},
FirstDriftedObservedTime: metav1.Time{Time: specificTime},
TargetClusterObservedGeneration: 1,
ObservedDrifts: []placementv1beta1.PatchDetail{
{
Path: "/spec/ports/1/containerPort",
ValueInHub: "80",
ValueInMember: "90",
},
},
},
},
}
return cmp.Diff(wantStatus, binding.Status, cmpConditionOption)
}, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name))
}
Expand Down Expand Up @@ -2232,6 +2334,67 @@ func markWorkAvailable(work *placementv1beta1.Work) {
By(fmt.Sprintf("resource work `%s` is marked as available", work.Name))
}

func markWorkWithNoDrift(work *placementv1beta1.Work) {
work.Status.ManifestConditions = []placementv1beta1.ManifestCondition{
{
Identifier: placementv1beta1.WorkResourceIdentifier{
Ordinal: 0,
Group: "",
Version: "v1",
Kind: "ConfigMap",
Name: "config-name",
Namespace: "config-namespace",
},
Conditions: []metav1.Condition{
{
Type: placementv1beta1.WorkConditionTypeApplied,
Status: metav1.ConditionTrue,
Reason: "fakeAppliedManifest",
Message: "fake apply manifest",
LastTransitionTime: metav1.Now(),
},
{
Type: placementv1beta1.WorkConditionTypeAvailable,
Status: metav1.ConditionTrue,
Reason: "fakeAvailableManifest",
Message: "fake available manifest",
LastTransitionTime: metav1.Now(),
},
},
DriftDetails: nil,
},
{
Identifier: placementv1beta1.WorkResourceIdentifier{
Ordinal: 1,
Group: "",
Version: "v1",
Kind: "Service",
Name: "svc-name",
Namespace: "svc-namespace",
},
Conditions: []metav1.Condition{
{
Type: placementv1beta1.WorkConditionTypeApplied,
Status: metav1.ConditionTrue,
Reason: "fakeAppliedManifest",
Message: "fake apply manifest",
LastTransitionTime: metav1.Now(),
},
{
Type: placementv1beta1.WorkConditionTypeAvailable,
Status: metav1.ConditionTrue,
Reason: "fakeAvailableManifest",
Message: "fake available manifest",
LastTransitionTime: metav1.Now(),
},
},
DriftDetails: nil,
},
}
Expect(k8sClient.Status().Update(ctx, work)).Should(Succeed())
By(fmt.Sprintf("resource work `%s` is marked with no drift", work.Name))
}

// markWorkWithFailedToApplyAndNotAvailable marks the work as not applied with failedPlacement
func markWorkWithFailedToApplyAndNotAvailable(work *placementv1beta1.Work, hasDiffedDetails, hasDriftedDetails bool) {
meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{
Expand Down
Loading

0 comments on commit fdaf6a9

Please sign in to comment.