Skip to content

Commit

Permalink
Modify the annotation check to happen in the CheckVolumeClaimSizes() …
Browse files Browse the repository at this point in the history
…instead of CheckRackPodTemplate() to ensure rest of the validations would still always happen. Also, modify the envtests to use more re-usable AsyncAssertions.
  • Loading branch information
burmanm committed Jul 4, 2024
1 parent 0d44a1a commit 0e8a28f
Show file tree
Hide file tree
Showing 4 changed files with 115 additions and 93 deletions.
181 changes: 96 additions & 85 deletions internal/controllers/cassandra/cassandradatacenter_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -54,7 +55,6 @@ func deleteDatacenter(ctx context.Context, dcName string) {
Expect(k8sClient.Delete(ctx, &dc)).To(Succeed())
}

/*
func createStorageClass(ctx context.Context, storageClassName string) {
sc := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -65,24 +65,23 @@ func createStorageClass(ctx context.Context, storageClassName string) {
}
Expect(k8sClient.Create(ctx, sc)).To(Succeed())
}
*/

func waitForDatacenterProgress(ctx context.Context, dcName string, state cassdcapi.ProgressState) {
Eventually(func(g Gomega) {
func waitForDatacenterProgress(ctx context.Context, dcName string, state cassdcapi.ProgressState) AsyncAssertion {
return Eventually(func(g Gomega) {
dc := cassdcapi.CassandraDatacenter{}
key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName}

g.Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed())
g.Expect(dc.Status.CassandraOperatorProgress).To(Equal(state))
}).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx).Should(Succeed())
}).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx)
}

func waitForDatacenterReady(ctx context.Context, dcName string) {
waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressReady)
func waitForDatacenterReady(ctx context.Context, dcName string) AsyncAssertion {
return waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressReady)
}

func waitForDatacenterCondition(ctx context.Context, dcName string, condition cassdcapi.DatacenterConditionType, status corev1.ConditionStatus) {
Eventually(func(g Gomega) {
func waitForDatacenterCondition(ctx context.Context, dcName string, condition cassdcapi.DatacenterConditionType, status corev1.ConditionStatus) AsyncAssertion {
return Eventually(func(g Gomega) {
dc := cassdcapi.CassandraDatacenter{}
key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName}

Expand All @@ -95,7 +94,7 @@ func waitForDatacenterCondition(ctx context.Context, dcName string, condition ca
}
}
g.Expect(false).To(BeTrue(), "Condition not found")
}).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx).Should(Succeed())
}).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx)
}

var _ = Describe("CassandraDatacenter tests", func() {
Expand All @@ -115,121 +114,133 @@ var _ = Describe("CassandraDatacenter tests", func() {
Expect(k8sClient.Delete(ctx, testNamespace)).To(Succeed())
})
})

Context("Single rack basic operations", func() {
It("should end up in a Ready state with a single node", func(ctx SpecContext) {
dcName := "dc1"

createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)
waitForDatacenterReady(ctx, dcName).Should(Succeed())

verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)
verifyStsCount(ctx, dcName, 1, 1).Should(Succeed())
verifyPodCount(ctx, dcName, 1).Should(Succeed())

waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterReady, corev1.ConditionTrue)
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterInitialized, corev1.ConditionTrue)
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterReady, corev1.ConditionTrue).Should(Succeed())
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterInitialized, corev1.ConditionTrue).Should(Succeed())

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName).Should(Succeed())
})
It("should end up in a Ready state with multiple nodes", func(ctx SpecContext) {
dcName := "dc1"

createDatacenter(ctx, dcName, 3, 1)

waitForDatacenterReady(ctx, dcName)
waitForDatacenterReady(ctx, dcName).Should(Succeed())

verifyStsCount(ctx, dcName, 1, 3)
verifyPodCount(ctx, dcName, 3)
verifyStsCount(ctx, dcName, 1, 3).Should(Succeed())
verifyPodCount(ctx, dcName, 3).Should(Succeed())

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName).Should(Succeed())
})
It("should be able to scale up", func(ctx SpecContext) {
dcName := "dc1"

dc := createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)
waitForDatacenterReady(ctx, dcName).Should(Succeed())

verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)
verifyStsCount(ctx, dcName, 1, 1).Should(Succeed())
verifyPodCount(ctx, dcName, 1).Should(Succeed())

refreshDatacenter(ctx, &dc)

By("Updating the size to 3")
dc.Spec.Size = 3
Expect(k8sClient.Update(ctx, &dc)).To(Succeed())

waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterScalingUp, corev1.ConditionTrue)
waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressUpdating)
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterScalingUp, corev1.ConditionTrue).Should(Succeed())
waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressUpdating).Should(Succeed())

Eventually(func(g Gomega) {
verifyStsCount(ctx, dcName, 1, 3)
verifyPodCount(ctx, dcName, 3)
})
verifyStsCount(ctx, dcName, 1, 3).Should(Succeed())
verifyPodCount(ctx, dcName, 3).Should(Succeed())

waitForDatacenterReady(ctx, dcName)
waitForDatacenterReady(ctx, dcName).Should(Succeed())

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName).Should(Succeed())
})
})
Context("There are multiple nodes in multiple racks", func() {
It("should end up in a Ready state", func(ctx SpecContext) {
dcName := "dc2"

createDatacenter(ctx, dcName, 9, 3)
waitForDatacenterReady(ctx, dcName)
waitForDatacenterReady(ctx, dcName).Should(Succeed())

verifyStsCount(ctx, dcName, 3, 3)
verifyPodCount(ctx, dcName, 9)
verifyStsCount(ctx, dcName, 3, 3).Should(Succeed())
verifyPodCount(ctx, dcName, 9).Should(Succeed())

deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName).Should(Succeed())
})
})

// This isn't functional with envtest at the moment, fails with (only in envtest):
/*
// This isn't functional with envtest at the moment
Context("Single datacenter modifications", func() {
It("should be able to expand PVC", func(ctx SpecContext) {
dcName := "dc12"
dc := createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)
createStorageClass(ctx, "default")
refreshDatacenter(ctx, &dc)
verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)
By("updating the storageSize to 2Gi")
refreshDatacenter(ctx, &dc)
metav1.SetMetaDataAnnotation(&dc.ObjectMeta, "cassandra.datastax.com/allow-pvc-expansion", "true")
dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("2Gi")
Expect(k8sClient.Update(ctx, &dc)).To(Succeed())
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionTrue)
waitForDatacenterReady(ctx, dcName)
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionFalse)
// Verify the StS was updated
verifyStsCount(ctx, dcName, 1, 1)
stsAll := &appsv1.StatefulSetList{}
Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
Expect(len(stsAll.Items)).To(Equal(1))
for _, sts := range stsAll.Items {
claimSize := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage]
Expect("2Gi").To(Equal(claimSize.String()))
}
deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
2024-07-04T17:05:07.636+0300 INFO PersistentVolumeClaim "server-data-cluster1-dc12-r0-sts-0" is invalid: spec: Forbidden: spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims
core.PersistentVolumeClaimSpec{
AccessModes: {"ReadWriteOnce"},
Selector: nil,
Resources: core.VolumeResourceRequirements{
Limits: nil,
- Requests: core.ResourceList{
- s"storage": {i: resource.int64Amount{value: 1073741824}, s: "1Gi", Format: "BinarySI"},
- },
+ Requests: core.ResourceList{
+ s"storage": {i: resource.int64Amount{value: 2147483648}, s: "2Gi", Format: "BinarySI"},
+ },
},
*/
/*
Context("Single datacenter modifications", func() {
It("should be able to expand PVC", func(ctx SpecContext) {
dcName := "dc12"
dc := createDatacenter(ctx, dcName, 1, 1)
waitForDatacenterReady(ctx, dcName)
createStorageClass(ctx, "default")
verifyStsCount(ctx, dcName, 1, 1)
verifyPodCount(ctx, dcName, 1)
waitForDatacenterReady(ctx, dcName)
By("updating the storageSize to 2Gi")
refreshDatacenter(ctx, &dc)
patch := client.MergeFrom(dc.DeepCopy())
metav1.SetMetaDataAnnotation(&dc.ObjectMeta, "cassandra.datastax.com/allow-storage-changes", "true")
dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("2Gi")
Expect(k8sClient.Patch(ctx, &dc, patch)).To(Succeed())
// Expect(k8sClient.Update(ctx, &dc)).To(Succeed())
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionTrue).Should(Succeed())
waitForDatacenterReady(ctx, dcName)
waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionFalse).Should(Succeed())
// Verify the StS was updated
verifyStsCount(ctx, dcName, 1, 1)
stsAll := &appsv1.StatefulSetList{}
Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
Expect(len(stsAll.Items)).To(Equal(1))
for _, sts := range stsAll.Items {
claimSize := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage]
Expect("2Gi").To(Equal(claimSize.String()))
}
deleteDatacenter(ctx, dcName)
verifyDatacenterDeleted(ctx, dcName)
})
})
*/
})
})
Expand All @@ -239,8 +250,8 @@ func refreshDatacenter(ctx context.Context, dc *cassdcapi.CassandraDatacenter) {
Expect(k8sClient.Get(ctx, key, dc)).To(Succeed())
}

func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts int) {
Eventually(func(g Gomega) {
func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts int) AsyncAssertion {
return Eventually(func(g Gomega) {
stsAll := &appsv1.StatefulSetList{}
g.Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(stsAll.Items)).To(Equal(rackCount))
Expand All @@ -252,19 +263,19 @@ func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts in
g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName, cassdcapi.RackLabel: rackName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(podList.Items)).To(Equal(podsPerSts))
}
}).Should(Succeed())
})
}

func verifyPodCount(ctx context.Context, dcName string, podCount int) {
Eventually(func(g Gomega) {
func verifyPodCount(ctx context.Context, dcName string, podCount int) AsyncAssertion {
return Eventually(func(g Gomega) {
podList := &corev1.PodList{}
g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed())
g.Expect(len(podList.Items)).To(Equal(podCount))
}).Should(Succeed())
})
}

func verifyDatacenterDeleted(ctx context.Context, dcName string) {
Eventually(func(g Gomega) {
func verifyDatacenterDeleted(ctx context.Context, dcName string) AsyncAssertion {
return Eventually(func(g Gomega) {
// Envtest has no garbage collection, so we can only compare that the ownerReferences are correct and they would be GCed (for items which we do not remove)

// Check that DC no longer exists
Expand Down Expand Up @@ -298,7 +309,7 @@ func verifyDatacenterDeleted(ctx context.Context, dcName string) {
g.Expect(pvc.GetDeletionTimestamp()).ToNot(BeNil())
}

}).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond).Should(Succeed())
}).WithContext(ctx).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond)
}

func verifyOwnerReference(g Gomega, ownerRef metav1.OwnerReference, dcName string) {
Expand Down
2 changes: 2 additions & 0 deletions internal/controllers/cassandra/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ var _ = BeforeSuite(func() {
control.JobRunningRequeue = time.Duration(1 * time.Millisecond)
control.TaskRunningRequeue = time.Duration(1 * time.Millisecond)

createStorageClass(ctx, "default")

go func() {
defer GinkgoRecover()
err = k8sManager.Start(ctx)
Expand Down
12 changes: 6 additions & 6 deletions pkg/reconciliation/reconcile_racks.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,9 @@ func (rc *ReconciliationContext) CheckVolumeClaimSizes(statefulSet, desiredSts *

if currentSize.Cmp(createdSize) < 0 {
rc.ReqLogger.Info("PVC resize request detected", "pvc", claim.Name, "currentSize", currentSize.String(), "createdSize", createdSize.String())
if !metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation) || rc.Datacenter.Annotations[api.AllowStorageChangesAnnotation] != "true" {
return result.Error(fmt.Errorf("PVC resize requested, but AllowStorageChangesAnnotation is not set to 'true'"))
}

if !supportsExpansion {
dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy())
Expand Down Expand Up @@ -283,8 +286,7 @@ func (rc *ReconciliationContext) CheckVolumeClaimSizes(statefulSet, desiredSts *
}

patch := client.MergeFrom(pvc.DeepCopy())
pvc.Spec.Resources.Requests["storage"] = createdSize
rc.ReqLogger.Info("Patching PVC", "pvc", pvc.Name, "size", createdSize.String())
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = createdSize
if err := rc.Client.Patch(rc.Ctx, pvc, patch); err != nil {
return result.Error(err)
}
Expand Down Expand Up @@ -383,10 +385,8 @@ func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult {
desiredSts.Annotations = utils.MergeMap(map[string]string{}, statefulSet.Annotations, desiredSts.Annotations)

// copy the stuff that can't be updated
if metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation) && rc.Datacenter.Annotations[api.AllowStorageChangesAnnotation] == "true" {
if res := rc.CheckVolumeClaimSizes(statefulSet, desiredSts); res.Completed() {
return res
}
if res := rc.CheckVolumeClaimSizes(statefulSet, desiredSts); res.Completed() {
return res
}
desiredSts.Spec.VolumeClaimTemplates = statefulSet.Spec.VolumeClaimTemplates

Expand Down
13 changes: 11 additions & 2 deletions pkg/reconciliation/reconcile_racks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2127,8 +2127,15 @@ func TestCheckVolumeClaimSizesValidation(t *testing.T) {
require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter))
desiredStatefulSet, err := newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2)
require.NoErrorf(err, "error occurred creating statefulset")

res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet)
require.Equal(result.Error(fmt.Errorf("PVC resize requested, but StorageClass does not support expansion")), res, "We should have an error, shrinking is disabled")
require.Equal(result.Error(fmt.Errorf("PVC resize requested, but AllowStorageChangesAnnotation is not set to 'true'")), res, "We should have an error, feature flag is not set")

metav1.SetMetaDataAnnotation(&rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation, "true")
require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter))

res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet)
require.Equal(result.Error(fmt.Errorf("PVC resize requested, but StorageClass does not support expansion")), res, "We should have an error, StorageClass does not allow expansion")
cond, found := rc.Datacenter.GetCondition(api.DatacenterValid)
require.True(found)
require.Equal(corev1.ConditionFalse, cond.Status)
Expand Down Expand Up @@ -2213,11 +2220,13 @@ func TestVolumeClaimSizesExpansion(t *testing.T) {
require.NoError(rc.Client.Create(rc.Ctx, pvc))
}

// Mark the StorageClass as allowing expansion
// Mark the StorageClass as allowing expansion and Datacenter to allow expansion
storageClass := &storagev1.StorageClass{}
require.NoError(rc.Client.Get(rc.Ctx, types.NamespacedName{Name: "standard"}, storageClass))
storageClass.AllowVolumeExpansion = ptr.To[bool](true)
require.NoError(rc.Client.Update(rc.Ctx, storageClass))
metav1.SetMetaDataAnnotation(&rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation, "true")
require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter))

res := rc.CheckVolumeClaimSizes(originalStatefulSet, originalStatefulSet)
require.Equal(result.Continue(), res, "No changes, we should continue")
Expand Down

0 comments on commit 0e8a28f

Please sign in to comment.