From 1c6a9331ec5b2f53d2c7049fa068fc3282c79b71 Mon Sep 17 00:00:00 2001 From: Michael Burman Date: Wed, 22 May 2024 11:11:03 +0300 Subject: [PATCH 1/2] Check is any PVCs are resizing and in requeue in that case. Also, add ability to verify if StorageClass supports resizing Add resize functionality, new DatacenterCondition to indicate Resizing is happening Patch the PVC and reconcile in 10 seconds Fix existing tests Add validation test Implement unit tests and fix the implementation of expansion Let VoluemResize actually just continue instead of waiting for the PVC resize, we will do a requeue and check there before recreating the StatefulSet Add RBAC role for StorageClasses Fix lint issues, update versions of tools Add e2e test for the PVC expansion using TopoLVM Disable StorageConfig webhook validation failure Improve failure logs with dump of pvc and pv also Move datacenterCondition reset for ResizingVolumes Log also StatefulSets, add annotation to allow StorageConfig changes Add validation if PVC expansion is allowed, modify the behavior to keep the existing PersistentVolumeClaims in the StS, but modifying the sizes only Update controller-runtime to 0.17.4 and update some logging Update docker/build-push-action from v4 to v6 Add CHANGELOG, return all the other e2e tests, update Cassandra/DSE versions Modify the annotation check to happen in the CheckVolumeClaimSizes() instead of CheckRackPodTemplate() to ensure rest of the validations would still always happen. Also, modify the envtests to use more re-usable AsyncAssertions. Add new events to indicate when Datacenter was set to Valid: False --- .github/workflows/kindIntegTest.yml | 107 ++++++- .github/workflows/operatorBuildAndDeploy.yml | 6 +- .github/workflows/release.yml | 10 +- .../workflows/workflow-integration-tests.yaml | 4 +- CHANGELOG.md | 1 + Dockerfile | 2 +- Makefile | 18 +- .../v1beta1/cassandradatacenter_types.go | 28 +- .../v1beta1/cassandradatacenter_webhook.go | 20 +- apis/cassandra/v1beta1/webhook_test.go | 91 +++++- ...dra.datastax.com_cassandradatacenters.yaml | 2 +- .../control.k8ssandra.io_cassandratasks.yaml | 2 +- config/rbac/role.yaml | 8 + go.mod | 38 +-- go.sum | 69 ++--- .../cassandradatacenter_controller.go | 1 + .../cassandradatacenter_controller_test.go | 258 +++++++++++----- internal/controllers/cassandra/suite_test.go | 2 + internal/envtest/statefulset_controller.go | 13 +- pkg/events/events.go | 3 + pkg/reconciliation/decommission_node.go | 1 + pkg/reconciliation/handler.go | 2 + pkg/reconciliation/reconcile_datacenter.go | 61 +++- pkg/reconciliation/reconcile_racks.go | 164 +++++++++- pkg/reconciliation/reconcile_racks_test.go | 292 +++++++++++++++++- pkg/reconciliation/testing.go | 12 +- tests/pvc_expansion/pvc_expansion_test.go | 99 ++++++ ...efault-single-rack-single-node-dc-lvm.yaml | 27 ++ tests/util/kubectl/kubectl.go | 15 +- 29 files changed, 1128 insertions(+), 228 deletions(-) create mode 100644 tests/pvc_expansion/pvc_expansion_test.go create mode 100644 tests/testdata/default-single-rack-single-node-dc-lvm.yaml diff --git a/.github/workflows/kindIntegTest.yml b/.github/workflows/kindIntegTest.yml index 634c13f6..4e5bffc4 100644 --- a/.github/workflows/kindIntegTest.yml +++ b/.github/workflows/kindIntegTest.yml @@ -3,7 +3,6 @@ on: push: branches: - master - - 1.10.x pull_request: branches: [master] jobs: @@ -23,7 +22,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: Dockerfile context: . @@ -34,7 +33,7 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: logger.Dockerfile push: false @@ -83,7 +82,7 @@ jobs: with: name: k8s-logs-${{ matrix.integration_test }} path: ./build/kubectl_dump - # This job is only for tests that don't run or don't pass against 4.1 yet + # # This job is only for tests that don't run or don't pass against 4.1 yet kind_40_tests: needs: build_docker_images strategy: @@ -118,12 +117,12 @@ jobs: strategy: matrix: version: - - "6.8.46" + - "6.8.49" integration_test: - cdc_successful include: - - version: 6.8.46 - serverImage: datastax/dse-mgmtapi-6_8:6.8.46-ubi8 # DSE 6.8.46 + - version: 6.8.49 + serverImage: datastax/dse-mgmtapi-6_8:6.8.49-ubi8 # DSE 6.8.49 serverType: dse integration_test: "cdc_successful" fail-fast: true @@ -234,8 +233,8 @@ jobs: matrix: version: - "3.11.17" - - "4.0.12" - - "4.1.4" + - "4.0.13" + - "4.1.5" - "6.8.50" - "6.9.0" - "1.0.0" @@ -278,3 +277,93 @@ jobs: with: name: k8s-logs-smoke_test-${{ matrix.version }} path: ./build/kubectl_dump + kind_topolvm_tests: + name: TopoLVM kind installation with volumeExpansion + needs: build_docker_images + strategy: + matrix: + version: + - "4.1.5" + integration_test: + - pvc_expansion + fail-fast: true + runs-on: ubuntu-latest + env: + CGO_ENABLED: 0 + M_INTEG_DIR: ${{ matrix.integration_test }} + M_SERVER_VERSION: ${{ matrix.version }} + steps: + - name: Install necessary tools for LVM setup + run: | + sudo apt-get update + sudo apt-get install -y lvm2 xfsprogs thin-provisioning-tools + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + with: + repository: topolvm/topolvm + path: topolvm + ref: topolvm-chart-v15.0.0 + - name: Create LVM from TopoLVM's example setup + run: | + cd topolvm/example + mkdir -p build + mkdir -p bin + make start-lvmd + make KIND=$(type -a -P kind) launch-kind + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: 'go.mod' + cache: true + - name: Install TopoLVM controller + run: | + make cert-manager + helm repo add topolvm https://topolvm.github.io/topolvm + helm repo update + kubectl create namespace topolvm-system + kubectl label namespace topolvm-system topolvm.io/webhook=ignore + kubectl label namespace kube-system topolvm.io/webhook=ignore + helm install --namespace=topolvm-system topolvm topolvm/topolvm -f topolvm/example/values.yaml + kubectl wait --for=condition=available --timeout=120s -n topolvm-system deployments/topolvm-controller + kubectl wait --for=condition=ready --timeout=120s -n topolvm-system -l="app.kubernetes.io/component=controller,app.kubernetes.io/name=topolvm" pod + kubectl wait --for=condition=ready --timeout=120s -n topolvm-system certificate/topolvm-mutatingwebhook + - name: Link tools + shell: bash + run: | + mkdir bin + ln -s /usr/local/bin/kustomize bin/kustomize + - name: Download cass-operator image + uses: actions/download-artifact@v4 + with: + name: cass-operator + path: /tmp + - name: Download system-logger image + uses: actions/download-artifact@v4 + with: + name: system-logger + path: /tmp + - name: Load Docker images + shell: bash + id: load + run: | + echo "operator_img=$(docker load --input /tmp/k8ssandra-cass-operator.tar | cut -f 3 -d' ')" >> $GITHUB_OUTPUT + echo "logger_img=$(docker load --input /tmp/k8ssandra-system-logger.tar | cut -f 3 -d' ')" >> $GITHUB_OUTPUT + - name: Load image on the nodes of the cluster + shell: bash + run: | + kind load docker-image --name=topolvm-example ${{ steps.load.outputs.operator_img }} + kind load docker-image --name=topolvm-example ${{ steps.load.outputs.logger_img }} + - name: Run integration test + shell: bash + run: | + IMG=${{ steps.load.outputs.operator_img }} LOG_IMG=${{ steps.load.outputs.logger_img }} make integ-test + - name: Archive k8s logs + # if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: k8s-logs-topolvm-test-${{ matrix.version }} + path: ./build/kubectl_dump \ No newline at end of file diff --git a/.github/workflows/operatorBuildAndDeploy.yml b/.github/workflows/operatorBuildAndDeploy.yml index cfba03c7..7d554c13 100644 --- a/.github/workflows/operatorBuildAndDeploy.yml +++ b/.github/workflows/operatorBuildAndDeploy.yml @@ -72,7 +72,7 @@ jobs: echo "version=$(make version)" >> $GITHUB_OUTPUT - name: Build and push id: docker_build_cass_operator - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: Dockerfile context: . @@ -83,7 +83,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache - name: Build and push id: docker_build_system_logger - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: logger.Dockerfile push: ${{ github.event_name != 'pull_request' }} @@ -99,7 +99,7 @@ jobs: bin/operator-sdk bundle validate ./bundle --select-optional name=good-practices - name: Build and push cass-operator-bundle id: docker_build_cass-operator_bundle - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: bundle.Dockerfile build-args: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b01aec65..9c9ff23a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -45,7 +45,7 @@ jobs: echo "TARGET_VERSION=$(echo ${GITHUB_REF#refs/tags/} | awk '{print substr($0,2)}')" >> $GITHUB_ENV - name: Build system-logger id: docker_build_system-logger - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: logger.Dockerfile build-args: | @@ -58,7 +58,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache - name: Build cass-operator id: docker_build_cass-operator - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: Dockerfile build-args: | @@ -71,7 +71,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache - name: Push system-logger id: docker_push_system-logger - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: logger.Dockerfile build-args: | @@ -84,7 +84,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache - name: Push cass-operator id: docker_push_cass-operator - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: Dockerfile build-args: | @@ -134,7 +134,7 @@ jobs: - name: Build and push cass-operator-bundle id: docker_build_cass-operator_bundle if: ${{ !env.ACT }} - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: bundle.Dockerfile build-args: | diff --git a/.github/workflows/workflow-integration-tests.yaml b/.github/workflows/workflow-integration-tests.yaml index 999d5b41..4f3d3333 100644 --- a/.github/workflows/workflow-integration-tests.yaml +++ b/.github/workflows/workflow-integration-tests.yaml @@ -25,7 +25,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: Dockerfile context: . @@ -36,7 +36,7 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: file: logger.Dockerfile push: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 292e4a5f..103b6668 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Changelog for Cass Operator, new PRs should update the `main / unreleased` secti ## unreleased +* [FEATURE] [#263]((https://github.com/k8ssandra/cass-operator/issues/263) Allow increasing the size of CassandraDataVolumeClaimSpec if the selected StorageClass supports it. This feature is currently behind a opt-in feature flag and requires an annotation ``cassandra.datastax.com/allow-storage-changes: true`` to be set in the CassandraDatacenter. * [ENHANCEMENT] [#648](https://github.com/k8ssandra/cass-operator/issues/648) Make MinReadySeconds configurable value in the Spec. * [FEATURE] [#646](https://github.com/k8ssandra/cass-operator/issues/646) Allow starting multiple parallel pods if they have already previously bootstrapped and not planned for replacement. Set annotation ``cassandra.datastax.com/allow-parallel-starts: true`` to enable this feature. diff --git a/Dockerfile b/Dockerfile index 4cadec57..eb8a43d7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.20 as builder +FROM golang:1.22 as builder ARG TARGETOS ARG TARGETARCH diff --git a/Makefile b/Makefile index 3bb5726c..ee519c3e 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ IMG ?= $(IMAGE_TAG_BASE):v$(VERSION) # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true" # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.28.x +ENVTEST_K8S_VERSION = 1.30.x # Logger image LOG_IMG_BASE ?= $(ORG)/system-logger @@ -139,9 +139,9 @@ test: manifests generate fmt vet lint envtest ## Run tests. integ-test: kustomize cert-manager helm ## Run integration tests from directory M_INTEG_DIR or set M_INTEG_DIR=all to run all the integration tests. ifeq ($(M_INTEG_DIR), all) # Run all the tests (exclude kustomize & testdata directories) - cd tests && go test -v ./... -timeout 300m --ginkgo.show-node-events --ginkgo.v + cd tests && go test -v ./... -timeout 60m --ginkgo.show-node-events --ginkgo.v else - cd tests/${M_INTEG_DIR} && go test -v ./... -timeout 300m --ginkgo.show-node-events --ginkgo.v + cd tests/${M_INTEG_DIR} && go test -v ./... -timeout 60m --ginkgo.show-node-events --ginkgo.v endif .PHONY: version @@ -239,13 +239,13 @@ HELM ?= $(LOCALBIN)/helm OPM ?= $(LOCALBIN)/opm ## Tool Versions -CERT_MANAGER_VERSION ?= v1.14.3 -KUSTOMIZE_VERSION ?= v5.3.0 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 -OPERATOR_SDK_VERSION ?= 1.34.1 +CERT_MANAGER_VERSION ?= v1.14.7 +KUSTOMIZE_VERSION ?= v5.4.2 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +OPERATOR_SDK_VERSION ?= 1.35.0 HELM_VERSION ?= 3.14.2 -OPM_VERSION ?= 1.36.0 -GOLINT_VERSION ?= 1.55.2 +OPM_VERSION ?= 1.38.0 +GOLINT_VERSION ?= 1.59.1 .PHONY: cert-manager cert-manager: ## Install cert-manager to the cluster diff --git a/apis/cassandra/v1beta1/cassandradatacenter_types.go b/apis/cassandra/v1beta1/cassandradatacenter_types.go index 09eb5de7..5a13b335 100644 --- a/apis/cassandra/v1beta1/cassandradatacenter_types.go +++ b/apis/cassandra/v1beta1/cassandradatacenter_types.go @@ -71,6 +71,9 @@ const ( // AllowParallelStartsAnnotations allows the operator to start multiple server nodes at the same time if they have already bootstrapped. AllowParallelStartsAnnotations = "cassandra.datastax.com/allow-parallel-starts" + // AllowStorageChangesAnnotation indicates the CassandraDatacenter StorageConfig can be modified for existing datacenters + AllowStorageChangesAnnotation = "cassandra.datastax.com/allow-storage-changes" + AllowUpdateAlways AllowUpdateType = "always" AllowUpdateOnce AllowUpdateType = "once" @@ -385,18 +388,19 @@ type CassandraStatusMap map[string]CassandraNodeStatus type DatacenterConditionType string const ( - DatacenterReady DatacenterConditionType = "Ready" - DatacenterInitialized DatacenterConditionType = "Initialized" - DatacenterReplacingNodes DatacenterConditionType = "ReplacingNodes" - DatacenterScalingUp DatacenterConditionType = "ScalingUp" - DatacenterScalingDown DatacenterConditionType = "ScalingDown" - DatacenterUpdating DatacenterConditionType = "Updating" - DatacenterStopped DatacenterConditionType = "Stopped" - DatacenterResuming DatacenterConditionType = "Resuming" - DatacenterRollingRestart DatacenterConditionType = "RollingRestart" - DatacenterValid DatacenterConditionType = "Valid" - DatacenterDecommission DatacenterConditionType = "Decommission" - DatacenterRequiresUpdate DatacenterConditionType = "RequiresUpdate" + DatacenterReady DatacenterConditionType = "Ready" + DatacenterInitialized DatacenterConditionType = "Initialized" + DatacenterReplacingNodes DatacenterConditionType = "ReplacingNodes" + DatacenterScalingUp DatacenterConditionType = "ScalingUp" + DatacenterScalingDown DatacenterConditionType = "ScalingDown" + DatacenterUpdating DatacenterConditionType = "Updating" + DatacenterStopped DatacenterConditionType = "Stopped" + DatacenterResuming DatacenterConditionType = "Resuming" + DatacenterRollingRestart DatacenterConditionType = "RollingRestart" + DatacenterValid DatacenterConditionType = "Valid" + DatacenterDecommission DatacenterConditionType = "Decommission" + DatacenterRequiresUpdate DatacenterConditionType = "RequiresUpdate" + DatacenterResizingVolumes DatacenterConditionType = "ResizingVolumes" // DatacenterHealthy indicates if QUORUM can be reached from all deployed nodes. // If this check fails, certain operations such as scaling up will not proceed. diff --git a/apis/cassandra/v1beta1/cassandradatacenter_webhook.go b/apis/cassandra/v1beta1/cassandradatacenter_webhook.go index 10a619d9..ce7bf31b 100644 --- a/apis/cassandra/v1beta1/cassandradatacenter_webhook.go +++ b/apis/cassandra/v1beta1/cassandradatacenter_webhook.go @@ -20,11 +20,11 @@ import ( "encoding/json" "errors" "fmt" - "reflect" "strings" + "github.com/google/go-cmp/cmp" "github.com/k8ssandra/cass-operator/pkg/images" - + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -173,16 +173,18 @@ func ValidateDatacenterFieldChanges(oldDc CassandraDatacenter, newDc CassandraDa return attemptedTo("change serviceAccount") } - // CassandraDataVolumeClaimSpec changes are disallowed + oldClaimSpec := oldDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.DeepCopy() + newClaimSpec := newDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.DeepCopy() - if !reflect.DeepEqual(oldDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec, newDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec) { - return attemptedTo("change storageConfig.CassandraDataVolumeClaimSpec") + // CassandraDataVolumeClaimSpec changes are disallowed + if metav1.HasAnnotation(newDc.ObjectMeta, AllowStorageChangesAnnotation) && newDc.Annotations[AllowStorageChangesAnnotation] == "true" { + // If the AllowStorageChangesAnnotation is set, we allow changes to the CassandraDataVolumeClaimSpec sizes, but not other fields + oldClaimSpec.Resources.Requests = newClaimSpec.Resources.Requests } - if oldDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec != nil { - if !reflect.DeepEqual(*oldDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec, *newDc.Spec.StorageConfig.CassandraDataVolumeClaimSpec) { - return attemptedTo("change storageConfig.CassandraDataVolumeClaimSpec") - } + if !apiequality.Semantic.DeepEqual(oldClaimSpec, newClaimSpec) { + pvcSourceDiff := cmp.Diff(oldClaimSpec, newClaimSpec) + return attemptedTo("change storageConfig.CassandraDataVolumeClaimSpec, diff: %s", pvcSourceDiff) } // Topology changes - Racks diff --git a/apis/cassandra/v1beta1/webhook_test.go b/apis/cassandra/v1beta1/webhook_test.go index dda14ef5..61f40223 100644 --- a/apis/cassandra/v1beta1/webhook_test.go +++ b/apis/cassandra/v1beta1/webhook_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -398,7 +399,7 @@ func Test_ValidateSingleDatacenter(t *testing.T) { func Test_ValidateDatacenterFieldChanges(t *testing.T) { storageSize := resource.MustParse("1Gi") - storageName := "server-data" + storageName := ptr.To[string]("server-data") tests := []struct { name string @@ -419,7 +420,7 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { DeprecatedServiceAccount: "admin", StorageConfig: StorageConfig{ CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageName, + StorageClassName: storageName, AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, @@ -446,7 +447,7 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { DeprecatedServiceAccount: "admin", StorageConfig: StorageConfig{ CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageName, + StorageClassName: storageName, AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, @@ -573,7 +574,7 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { Spec: CassandraDatacenterSpec{ StorageConfig: StorageConfig{ CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageName, + StorageClassName: storageName, AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, @@ -589,7 +590,7 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { Spec: CassandraDatacenterSpec{ StorageConfig: StorageConfig{ CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageName, + StorageClassName: storageName, AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, @@ -600,6 +601,84 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { }, errString: "change storageConfig.CassandraDataVolumeClaimSpec", }, + { + name: "StorageClassName changes with storageConfig changes allowed", + oldDc: &CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exampleDC", + }, + Spec: CassandraDatacenterSpec{ + StorageConfig: StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: storageName, + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, + }, + }, + }, + }, + }, + newDc: &CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exampleDC", + Annotations: map[string]string{ + AllowStorageChangesAnnotation: "true", + }, + }, + Spec: CassandraDatacenterSpec{ + StorageConfig: StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To[string]("new-server-data"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, + }, + }, + }, + }, + }, + errString: "change storageConfig.CassandraDataVolumeClaimSpec", + }, + { + name: "storage requests size changes with storageConfig changes allowed", + oldDc: &CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exampleDC", + }, + Spec: CassandraDatacenterSpec{ + StorageConfig: StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: storageName, + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, + }, + }, + }, + }, + }, + newDc: &CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exampleDC", + Annotations: map[string]string{ + AllowStorageChangesAnnotation: "true", + }, + }, + Spec: CassandraDatacenterSpec{ + StorageConfig: StorageConfig{ + CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: storageName, + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{"storage": resource.MustParse("2Gi")}, + }, + }, + }, + }, + }, + errString: "", + }, { name: "Removing a rack", oldDc: &CassandraDatacenter{ @@ -836,7 +915,7 @@ func Test_ValidateDatacenterFieldChanges(t *testing.T) { } else { if tt.errString == "" { t.Errorf("ValidateDatacenterFieldChanges() err = %v, should be valid", err) - } else if !strings.HasSuffix(err.Error(), tt.errString) { + } else if !strings.Contains(err.Error(), tt.errString) { t.Errorf("ValidateDatacenterFieldChanges() err = %v, want suffix %v", err, tt.errString) } } diff --git a/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml b/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml index 74558566..9cda1de7 100644 --- a/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml +++ b/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: cassandradatacenters.cassandra.datastax.com spec: group: cassandra.datastax.com diff --git a/config/crd/bases/control.k8ssandra.io_cassandratasks.yaml b/config/crd/bases/control.k8ssandra.io_cassandratasks.yaml index d1c79bbf..a0dc841c 100644 --- a/config/crd/bases/control.k8ssandra.io_cassandratasks.yaml +++ b/config/crd/bases/control.k8ssandra.io_cassandratasks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: cassandratasks.control.k8ssandra.io spec: group: control.k8ssandra.io diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7706c3a5..ffd0d00e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -13,6 +13,14 @@ rules: - get - list - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/go.mod b/go.mod index 40d3ec57..33ba1003 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,31 @@ module github.com/k8ssandra/cass-operator -go 1.21 +go 1.22 require ( github.com/davecgh/go-spew v1.1.1 github.com/go-logr/logr v1.4.1 github.com/google/uuid v1.3.0 - github.com/onsi/gomega v1.30.0 + github.com/onsi/gomega v1.33.1 github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.8.4 golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/term v0.15.0 + golang.org/x/term v0.20.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.0 - k8s.io/apimachinery v0.29.0 - k8s.io/client-go v0.29.0 - sigs.k8s.io/controller-runtime v0.17.2 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 + sigs.k8s.io/controller-runtime v0.17.4 ) require ( github.com/Jeffail/gabs/v2 v2.7.0 - github.com/onsi/ginkgo/v2 v2.14.0 + github.com/google/go-cmp v0.6.0 + github.com/onsi/ginkgo/v2 v2.19.0 github.com/prometheus/client_golang v1.18.0 go.uber.org/zap v1.26.0 - golang.org/x/mod v0.14.0 + golang.org/x/mod v0.17.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) @@ -39,14 +40,13 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -63,18 +63,18 @@ require ( github.com/stretchr/objx v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.21.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/component-base v0.29.0 // indirect + k8s.io/apiextensions-apiserver v0.29.2 // indirect + k8s.io/component-base v0.29.2 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/go.sum b/go.sum index 60b4a3ae..0eab4311 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,8 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -46,8 +46,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de h1:6bMcLOeKoNo0+mTOb1ee3McF6CCKGixjLR3EDQY1Jik= -github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= @@ -76,10 +76,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible h1:Jd6xfriVlJ6hWPvYOE0Ni0QWcNTLRehfGPFxr3eSL80= github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible/go.mod h1:xlUlxe/2ItGlQyMTstqeDv9r3U4obH7xYd26TbDQutY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -103,7 +103,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -124,15 +123,15 @@ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfU golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -141,23 +140,23 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -168,8 +167,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -181,24 +180,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= -sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-runtime v0.17.4 h1:AMf1E0+93/jLQ13fb76S6Atwqp24EQFCmNbG84GJxew= +sigs.k8s.io/controller-runtime v0.17.4/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/internal/controllers/cassandra/cassandradatacenter_controller.go b/internal/controllers/cassandra/cassandradatacenter_controller.go index 0f859132..7bd6bbcb 100644 --- a/internal/controllers/cassandra/cassandradatacenter_controller.go +++ b/internal/controllers/cassandra/cassandradatacenter_controller.go @@ -61,6 +61,7 @@ var ( // +kubebuilder:rbac:groups=core,namespace=cass-operator,resources=pods;endpoints;services;configmaps;secrets;persistentvolumeclaims;events,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,namespace=cass-operator,resources=namespaces,verbs=get // +kubebuilder:rbac:groups=core,resources=persistentvolumes;nodes,verbs=get;list;watch +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch // +kubebuilder:rbac:groups=policy,namespace=cass-operator,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete // CassandraDatacenterReconciler reconciles a cassandraDatacenter object diff --git a/internal/controllers/cassandra/cassandradatacenter_controller_test.go b/internal/controllers/cassandra/cassandradatacenter_controller_test.go index e3c52e2c..b0a089dd 100644 --- a/internal/controllers/cassandra/cassandradatacenter_controller_test.go +++ b/internal/controllers/cassandra/cassandradatacenter_controller_test.go @@ -10,6 +10,7 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,6 +21,10 @@ import ( cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" ) +const ( + pollingTime = 50 * time.Millisecond +) + var ( testNamespaceName string ) @@ -50,116 +55,203 @@ func deleteDatacenter(ctx context.Context, dcName string) { Expect(k8sClient.Delete(ctx, &dc)).To(Succeed()) } -func waitForDatacenterProgress(ctx context.Context, dcName string, state cassdcapi.ProgressState) { - Eventually(func(g Gomega) { +func createStorageClass(ctx context.Context, storageClassName string) { + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + AllowVolumeExpansion: ptr.To[bool](true), + Provisioner: "kubernetes.io/no-provisioner", + } + Expect(k8sClient.Create(ctx, sc)).To(Succeed()) +} + +func waitForDatacenterProgress(ctx context.Context, dcName string, state cassdcapi.ProgressState) AsyncAssertion { + return Eventually(func(g Gomega) { dc := cassdcapi.CassandraDatacenter{} key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName} g.Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed()) g.Expect(dc.Status.CassandraOperatorProgress).To(Equal(state)) - }).WithTimeout(20 * time.Second).WithPolling(200 * time.Millisecond).WithContext(ctx).Should(Succeed()) + }).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx) +} + +func waitForDatacenterReady(ctx context.Context, dcName string) AsyncAssertion { + return waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressReady) } -func waitForDatacenterReady(ctx context.Context, dcName string) { - waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressReady) +func waitForDatacenterCondition(ctx context.Context, dcName string, condition cassdcapi.DatacenterConditionType, status corev1.ConditionStatus) AsyncAssertion { + return Eventually(func(g Gomega) { + dc := cassdcapi.CassandraDatacenter{} + key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName} + + g.Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed()) + g.Expect(dc.Status.Conditions).ToNot(BeNil()) + for _, cond := range dc.Status.Conditions { + if cond.Type == condition { + g.Expect(cond.Status).To(Equal(status)) + return + } + } + g.Expect(false).To(BeTrue(), "Condition not found") + }).WithTimeout(20 * time.Second).WithPolling(pollingTime).WithContext(ctx) } var _ = Describe("CassandraDatacenter tests", func() { Describe("Creating a new datacenter", func() { - Context("Single datacenter", func() { - BeforeEach(func() { - testNamespaceName = fmt.Sprintf("test-cassdc-%d", rand.Int31()) - testNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespaceName, - }, - } - Expect(k8sClient.Create(ctx, testNamespace)).Should(Succeed()) + BeforeEach(func() { + testNamespaceName = fmt.Sprintf("test-cassdc-%d", rand.Int31()) + testNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespaceName, + }, + } + Expect(k8sClient.Create(ctx, testNamespace)).Should(Succeed()) + DeferCleanup(func() { + // Note that envtest doesn't actually delete any namespace as it doesn't have kube-controller-manager running + // but it does it mark it as "terminating", so modifying and adding new resources will cause an error in the test + // https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation + Expect(k8sClient.Delete(ctx, testNamespace)).To(Succeed()) }) + }) + Context("Single rack basic operations", func() { + It("should end up in a Ready state with a single node", func(ctx SpecContext) { + dcName := "dc1" - AfterEach(func() { - testNamespaceDel := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespaceName, - }, - } - Expect(k8sClient.Delete(ctx, testNamespaceDel)).To(Succeed()) + createDatacenter(ctx, dcName, 1, 1) + waitForDatacenterReady(ctx, dcName).Should(Succeed()) + + verifyStsCount(ctx, dcName, 1, 1).Should(Succeed()) + verifyPodCount(ctx, dcName, 1).Should(Succeed()) + + waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterReady, corev1.ConditionTrue).Should(Succeed()) + waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterInitialized, corev1.ConditionTrue).Should(Succeed()) + + deleteDatacenter(ctx, dcName) + verifyDatacenterDeleted(ctx, dcName).Should(Succeed()) }) - When("There is a single rack and a single node", func() { - It("should end up in a Ready state", func(ctx SpecContext) { - dcName := "dc1" + It("should end up in a Ready state with multiple nodes", func(ctx SpecContext) { + dcName := "dc1" - createDatacenter(ctx, dcName, 1, 1) - waitForDatacenterReady(ctx, dcName) + createDatacenter(ctx, dcName, 3, 1) - verifyStsCount(ctx, dcName, 1, 1) - verifyPodCount(ctx, dcName, 1) + waitForDatacenterReady(ctx, dcName).Should(Succeed()) - deleteDatacenter(ctx, dcName) - verifyDatacenterDeleted(ctx, dcName) - }) - It("should be able to scale up", func(ctx SpecContext) { - dcName := "dc11" + verifyStsCount(ctx, dcName, 1, 3).Should(Succeed()) + verifyPodCount(ctx, dcName, 3).Should(Succeed()) - dc := createDatacenter(ctx, dcName, 1, 1) - waitForDatacenterReady(ctx, dcName) + deleteDatacenter(ctx, dcName) + verifyDatacenterDeleted(ctx, dcName).Should(Succeed()) + }) + It("should be able to scale up", func(ctx SpecContext) { + dcName := "dc1" - verifyStsCount(ctx, dcName, 1, 1) - verifyPodCount(ctx, dcName, 1) + dc := createDatacenter(ctx, dcName, 1, 1) + waitForDatacenterReady(ctx, dcName).Should(Succeed()) - key := types.NamespacedName{Namespace: testNamespaceName, Name: dcName} - Expect(k8sClient.Get(ctx, key, &dc)).To(Succeed()) + verifyStsCount(ctx, dcName, 1, 1).Should(Succeed()) + verifyPodCount(ctx, dcName, 1).Should(Succeed()) - By("Updating the size to 3") - dc.Spec.Size = 3 - Expect(k8sClient.Update(ctx, &dc)).To(Succeed()) + refreshDatacenter(ctx, &dc) - Eventually(func(g Gomega) { - verifyStsCount(ctx, dcName, 1, 3) - verifyPodCount(ctx, dcName, 3) - }) + By("Updating the size to 3") + dc.Spec.Size = 3 + Expect(k8sClient.Update(ctx, &dc)).To(Succeed()) - waitForDatacenterReady(ctx, dcName) + waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterScalingUp, corev1.ConditionTrue).Should(Succeed()) + waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressUpdating).Should(Succeed()) - deleteDatacenter(ctx, dcName) - verifyDatacenterDeleted(ctx, dcName) - }) + verifyStsCount(ctx, dcName, 1, 3).Should(Succeed()) + verifyPodCount(ctx, dcName, 3).Should(Succeed()) + + waitForDatacenterReady(ctx, dcName).Should(Succeed()) + + deleteDatacenter(ctx, dcName) + verifyDatacenterDeleted(ctx, dcName).Should(Succeed()) }) - When("There are multiple nodes in a single rack", func() { - It("should end up in a Ready state", func(ctx SpecContext) { - dcName := "dc2" + }) + Context("There are multiple nodes in multiple racks", func() { + It("should end up in a Ready state", func(ctx SpecContext) { + dcName := "dc2" - createDatacenter(ctx, dcName, 3, 1) + createDatacenter(ctx, dcName, 9, 3) + waitForDatacenterReady(ctx, dcName).Should(Succeed()) + verifyStsCount(ctx, dcName, 3, 3).Should(Succeed()) + verifyPodCount(ctx, dcName, 9).Should(Succeed()) + + deleteDatacenter(ctx, dcName) + verifyDatacenterDeleted(ctx, dcName).Should(Succeed()) + }) + }) + // This isn't functional with envtest at the moment, fails with (only in envtest): + /* + 2024-07-04T17:05:07.636+0300 INFO PersistentVolumeClaim "server-data-cluster1-dc12-r0-sts-0" is invalid: spec: Forbidden: spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims + core.PersistentVolumeClaimSpec{ + AccessModes: {"ReadWriteOnce"}, + Selector: nil, + Resources: core.VolumeResourceRequirements{ + Limits: nil, + - Requests: core.ResourceList{ + - s"storage": {i: resource.int64Amount{value: 1073741824}, s: "1Gi", Format: "BinarySI"}, + - }, + + Requests: core.ResourceList{ + + s"storage": {i: resource.int64Amount{value: 2147483648}, s: "2Gi", Format: "BinarySI"}, + + }, + }, + */ + /* + Context("Single datacenter modifications", func() { + It("should be able to expand PVC", func(ctx SpecContext) { + dcName := "dc12" + + dc := createDatacenter(ctx, dcName, 1, 1) waitForDatacenterReady(ctx, dcName) + createStorageClass(ctx, "default") - verifyStsCount(ctx, dcName, 1, 3) - verifyPodCount(ctx, dcName, 3) + verifyStsCount(ctx, dcName, 1, 1) + verifyPodCount(ctx, dcName, 1) + waitForDatacenterReady(ctx, dcName) - deleteDatacenter(ctx, dcName) - verifyDatacenterDeleted(ctx, dcName) - }) - }) - When("There are multiple nodes in multiple racks", func() { - It("should end up in a Ready state", func(ctx SpecContext) { - dcName := "dc3" + By("updating the storageSize to 2Gi") + refreshDatacenter(ctx, &dc) + patch := client.MergeFrom(dc.DeepCopy()) + metav1.SetMetaDataAnnotation(&dc.ObjectMeta, "cassandra.datastax.com/allow-storage-changes", "true") + dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("2Gi") + Expect(k8sClient.Patch(ctx, &dc, patch)).To(Succeed()) + // Expect(k8sClient.Update(ctx, &dc)).To(Succeed()) - createDatacenter(ctx, dcName, 9, 3) + waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionTrue).Should(Succeed()) waitForDatacenterReady(ctx, dcName) + waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionFalse).Should(Succeed()) + + // Verify the StS was updated + verifyStsCount(ctx, dcName, 1, 1) + stsAll := &appsv1.StatefulSetList{} - verifyStsCount(ctx, dcName, 3, 3) - verifyPodCount(ctx, dcName, 9) + Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) + Expect(len(stsAll.Items)).To(Equal(1)) + + for _, sts := range stsAll.Items { + claimSize := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] + Expect("2Gi").To(Equal(claimSize.String())) + } deleteDatacenter(ctx, dcName) verifyDatacenterDeleted(ctx, dcName) }) }) - }) + */ }) }) -func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts int) { - Eventually(func(g Gomega) { +func refreshDatacenter(ctx context.Context, dc *cassdcapi.CassandraDatacenter) { + key := types.NamespacedName{Namespace: testNamespaceName, Name: dc.Name} + Expect(k8sClient.Get(ctx, key, dc)).To(Succeed()) +} + +func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts int) AsyncAssertion { + return Eventually(func(g Gomega) { stsAll := &appsv1.StatefulSetList{} g.Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) g.Expect(len(stsAll.Items)).To(Equal(rackCount)) @@ -171,19 +263,19 @@ func verifyStsCount(ctx context.Context, dcName string, rackCount, podsPerSts in g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName, cassdcapi.RackLabel: rackName}, client.InNamespace(testNamespaceName))).To(Succeed()) g.Expect(len(podList.Items)).To(Equal(podsPerSts)) } - }).Should(Succeed()) + }) } -func verifyPodCount(ctx context.Context, dcName string, podCount int) { - Eventually(func(g Gomega) { +func verifyPodCount(ctx context.Context, dcName string, podCount int) AsyncAssertion { + return Eventually(func(g Gomega) { podList := &corev1.PodList{} g.Expect(k8sClient.List(ctx, podList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) g.Expect(len(podList.Items)).To(Equal(podCount)) - }).Should(Succeed()) + }) } -func verifyDatacenterDeleted(ctx context.Context, dcName string) { - Eventually(func(g Gomega) { +func verifyDatacenterDeleted(ctx context.Context, dcName string) AsyncAssertion { + return Eventually(func(g Gomega) { // Envtest has no garbage collection, so we can only compare that the ownerReferences are correct and they would be GCed (for items which we do not remove) // Check that DC no longer exists @@ -192,20 +284,22 @@ func verifyDatacenterDeleted(ctx context.Context, dcName string) { err := k8sClient.Get(ctx, dcKey, dc) g.Expect(errors.IsNotFound(err)).To(BeTrue()) - // Check that services would be autodeleted + // Check that services would be autodeleted and then remove them svcList := &corev1.ServiceList{} g.Expect(k8sClient.List(ctx, svcList, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) for _, svc := range svcList.Items { g.Expect(len(svc.OwnerReferences)).To(Equal(1)) verifyOwnerReference(g, svc.OwnerReferences[0], dcName) + g.Expect(k8sClient.Delete(ctx, &svc)).To(Succeed()) } - // Check that all StS would be autoremoved + // Check that all StS would be autoremoved and remove them stsAll := &appsv1.StatefulSetList{} g.Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) for _, sts := range stsAll.Items { g.Expect(len(sts.OwnerReferences)).To(Equal(1)) verifyOwnerReference(g, sts.OwnerReferences[0], dcName) + g.Expect(k8sClient.Delete(ctx, &sts)).To(Succeed()) } // Check that all PVCs were removed (we remove these) @@ -215,7 +309,7 @@ func verifyDatacenterDeleted(ctx context.Context, dcName string) { g.Expect(pvc.GetDeletionTimestamp()).ToNot(BeNil()) } - }).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond).Should(Succeed()) + }).WithContext(ctx).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond) } func verifyOwnerReference(g Gomega, ownerRef metav1.OwnerReference, dcName string) { @@ -227,9 +321,11 @@ func verifyOwnerReference(g Gomega, ownerRef metav1.OwnerReference, dcName strin func createStubCassDc(dcName string, nodeCount int32) cassdcapi.CassandraDatacenter { return cassdcapi.CassandraDatacenter{ ObjectMeta: metav1.ObjectMeta{ - Name: dcName, - Namespace: testNamespaceName, - Annotations: map[string]string{}, + Name: dcName, + Namespace: testNamespaceName, + Annotations: map[string]string{ + cassdcapi.UpdateAllowedAnnotation: "true", + }, }, Spec: cassdcapi.CassandraDatacenterSpec{ ManagementApiAuth: cassdcapi.ManagementApiAuthConfig{ diff --git a/internal/controllers/cassandra/suite_test.go b/internal/controllers/cassandra/suite_test.go index c0d3b2a2..711464e8 100644 --- a/internal/controllers/cassandra/suite_test.go +++ b/internal/controllers/cassandra/suite_test.go @@ -130,6 +130,8 @@ var _ = BeforeSuite(func() { control.JobRunningRequeue = time.Duration(1 * time.Millisecond) control.TaskRunningRequeue = time.Duration(1 * time.Millisecond) + createStorageClass(ctx, "default") + go func() { defer GinkgoRecover() err = k8sManager.Start(ctx) diff --git a/internal/envtest/statefulset_controller.go b/internal/envtest/statefulset_controller.go index a385a884..af6977b2 100644 --- a/internal/envtest/statefulset_controller.go +++ b/internal/envtest/statefulset_controller.go @@ -105,15 +105,19 @@ func (r *StatefulSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) } for i := 0; i < intendedReplicas; i++ { - if i <= len(stsPods)-1 { - continue - } - podKey := types.NamespacedName{ Name: fmt.Sprintf("%s-%d", sts.Name, i), Namespace: sts.Namespace, } + if err := r.Client.Get(ctx, podKey, &corev1.Pod{}); err == nil { + // Pod already exists + continue + } else if client.IgnoreNotFound(err) != nil { + logger.Error(err, "Failed to get the Pod") + return ctrl.Result{}, err + } + pod := &corev1.Pod{ // Technically this comes from a combination of Template.ObjectMeta, but we're just adding some fields ObjectMeta: metav1.ObjectMeta{ @@ -124,7 +128,6 @@ func (r *StatefulSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) Spec: sts.Spec.Template.Spec, } - // tbh, why do we need to add this here..? pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ Name: "server-data", diff --git a/pkg/events/events.go b/pkg/events/events.go index d07d482a..a22cb0dc 100644 --- a/pkg/events/events.go +++ b/pkg/events/events.go @@ -19,6 +19,7 @@ const ( LabeledPodAsSeed string = "LabeledPodAsSeed" LabeledPodAsDecommissioning string = "LabeledPodAsDecommissioning" DeletedPvc string = "DeletedPvc" + ResizingPVC string = "ResizingPVC" UnlabeledPodAsSeed string = "UnlabeledPodAsSeed" LabeledRackResource string = "LabeledRackResource" ScalingUpRack string = "ScalingUpRack" @@ -32,6 +33,8 @@ const ( DecommissionDatacenter string = "DecommissionDatacenter" DecommissioningNode string = "DecommissioningNode" UnhealthyDatacenter string = "UnhealthyDatacenter" + RecreatingStatefulSet string = "RecreatingStatefulSet" + InvalidDatacenterSpec string = "InvalidDatacenterSpec" ) type LoggingEventRecorder struct { diff --git a/pkg/reconciliation/decommission_node.go b/pkg/reconciliation/decommission_node.go index f8da9e7f..de2c5160 100644 --- a/pkg/reconciliation/decommission_node.go +++ b/pkg/reconciliation/decommission_node.go @@ -422,6 +422,7 @@ func (rc *ReconciliationContext) EnsurePodsCanAbsorbDecommData(decommPod *corev1 pod.Name, free, int64(spaceUsedByDecommPod), ) rc.ReqLogger.Error(fmt.Errorf(msg), msg) + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeWarning, events.InvalidDatacenterSpec, msg) dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy()) updated := rc.setCondition( diff --git a/pkg/reconciliation/handler.go b/pkg/reconciliation/handler.go index 772c78dd..7c48f632 100644 --- a/pkg/reconciliation/handler.go +++ b/pkg/reconciliation/handler.go @@ -121,6 +121,8 @@ func (rc *ReconciliationContext) IsValid(dc *api.CassandraDatacenter) error { return errs[0] } + // TODO Verify if we can expand the PVC or should we reject changes + claim := dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec if claim == nil { err := fmt.Errorf("storageConfig.cassandraDataVolumeClaimSpec is required") diff --git a/pkg/reconciliation/reconcile_datacenter.go b/pkg/reconciliation/reconcile_datacenter.go index 56b9cfb3..cee40f81 100644 --- a/pkg/reconciliation/reconcile_datacenter.go +++ b/pkg/reconciliation/reconcile_datacenter.go @@ -4,9 +4,13 @@ package reconciliation import ( + "context" + "fmt" + "github.com/k8ssandra/cass-operator/internal/result" "github.com/k8ssandra/cass-operator/pkg/events" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -112,7 +116,7 @@ func (rc *ReconciliationContext) deletePVCs() error { "cassandraDatacenterName", rc.Datacenter.Name, ) - persistentVolumeClaimList, err := rc.listPVCs() + persistentVolumeClaimList, err := rc.listPVCs(rc.Datacenter.GetDatacenterLabels()) if err != nil { if errors.IsNotFound(err) { logger.Info("No PVCs found for CassandraDatacenter") @@ -124,9 +128,9 @@ func (rc *ReconciliationContext) deletePVCs() error { logger.Info( "Found PVCs for cassandraDatacenter", - "numPVCs", len(persistentVolumeClaimList.Items)) + "numPVCs", len(persistentVolumeClaimList)) - for _, pvc := range persistentVolumeClaimList.Items { + for _, pvc := range persistentVolumeClaimList { if err := rc.Client.Delete(rc.Ctx, &pvc); err != nil { logger.Error(err, "Failed to delete PVCs for cassandraDatacenter") return err @@ -140,13 +144,9 @@ func (rc *ReconciliationContext) deletePVCs() error { return nil } -func (rc *ReconciliationContext) listPVCs() (*corev1.PersistentVolumeClaimList, error) { +func (rc *ReconciliationContext) listPVCs(selector map[string]string) ([]corev1.PersistentVolumeClaim, error) { rc.ReqLogger.Info("reconciler::listPVCs") - selector := map[string]string{ - api.DatacenterLabel: api.CleanLabelValue(rc.Datacenter.DatacenterName()), - } - listOptions := &client.ListOptions{ Namespace: rc.Datacenter.Namespace, LabelSelector: labels.SelectorFromSet(selector), @@ -159,5 +159,48 @@ func (rc *ReconciliationContext) listPVCs() (*corev1.PersistentVolumeClaimList, }, } - return persistentVolumeClaimList, rc.Client.List(rc.Ctx, persistentVolumeClaimList, listOptions) + pvcList, err := persistentVolumeClaimList, rc.Client.List(rc.Ctx, persistentVolumeClaimList, listOptions) + if err != nil { + return nil, err + } + + return pvcList.Items, nil +} + +func storageClass(ctx context.Context, c client.Client, storageClassName string) (*storagev1.StorageClass, error) { + if storageClassName == "" { + storageClassList := &storagev1.StorageClassList{} + if err := c.List(ctx, storageClassList, client.MatchingLabels{"storageclass.kubernetes.io/is-default-class": "true"}); err != nil { + return nil, err + } + + if len(storageClassList.Items) > 0 { + return nil, fmt.Errorf("found multiple default storage classes, please specify StorageClassName in the CassandraDatacenter spec") + } else if len(storageClassList.Items) == 0 { + return nil, fmt.Errorf("no default storage class found, please specify StorageClassName in the CassandraDatacenter spec") + } + + return &storageClassList.Items[0], nil + } + + storageClass := &storagev1.StorageClass{} + if err := c.Get(ctx, types.NamespacedName{Name: storageClassName}, storageClass); err != nil { + return nil, err + } + + return storageClass, nil +} + +func (rc *ReconciliationContext) storageExpansion() (bool, error) { + storageClassName := rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.StorageClassName + storageClass, err := storageClass(rc.Ctx, rc.Client, *storageClassName) + if err != nil { + return false, err + } + + if storageClass.AllowVolumeExpansion != nil && *storageClass.AllowVolumeExpansion { + return true, nil + } + + return false, nil } diff --git a/pkg/reconciliation/reconcile_racks.go b/pkg/reconciliation/reconcile_racks.go index 2667706d..c838b102 100644 --- a/pkg/reconciliation/reconcile_racks.go +++ b/pkg/reconciliation/reconcile_racks.go @@ -171,10 +171,149 @@ func (rc *ReconciliationContext) UpdateAllowed() bool { return rc.Datacenter.GenerationChanged() || metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.UpdateAllowedAnnotation) } +func (rc *ReconciliationContext) CheckPVCResizing() result.ReconcileResult { + rc.ReqLogger.Info("reconcile_racks::CheckPVCResizing") + pvcList, err := rc.listPVCs(rc.Datacenter.GetDatacenterLabels()) + if err != nil { + return result.Error(err) + } + + for _, pvc := range pvcList { + if isPVCResizing(&pvc) { + rc.ReqLogger.Info("Waiting for PVC resize to complete", + "pvc", pvc.Name) + return result.RequeueSoon(10) + } + } + + dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy()) + if updated := rc.setCondition(api.NewDatacenterCondition(api.DatacenterResizingVolumes, corev1.ConditionFalse)); updated { + if err := rc.Client.Status().Patch(rc.Ctx, rc.Datacenter, dcPatch); err != nil { + rc.ReqLogger.Error(err, "error patching datacenter status for updating") + return result.Error(err) + } + } + + return result.Continue() +} + +func isPVCResizing(pvc *corev1.PersistentVolumeClaim) bool { + return isPVCStatusConditionTrue(pvc, corev1.PersistentVolumeClaimResizing) || + isPVCStatusConditionTrue(pvc, corev1.PersistentVolumeClaimFileSystemResizePending) +} + +func isPVCStatusConditionTrue(pvc *corev1.PersistentVolumeClaim, conditionType corev1.PersistentVolumeClaimConditionType) bool { + for _, condition := range pvc.Status.Conditions { + if condition.Type == conditionType && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func (rc *ReconciliationContext) CheckVolumeClaimSizes(statefulSet, desiredSts *appsv1.StatefulSet) result.ReconcileResult { + rc.ReqLogger.Info("reconcile_racks::CheckVolumeClaims") + + supportsExpansion, err := rc.storageExpansion() + if err != nil { + return result.Error(err) + } + + for i, claim := range statefulSet.Spec.VolumeClaimTemplates { + // Find the desired one + desiredClaim := desiredSts.Spec.VolumeClaimTemplates[i] + if claim.Name != desiredClaim.Name { + return result.Error(fmt.Errorf("statefulSet and desiredSts have different volume claim templates")) + } + + currentSize := claim.Spec.Resources.Requests[corev1.ResourceStorage] + createdSize := desiredClaim.Spec.Resources.Requests[corev1.ResourceStorage] + + // TODO This code is a bit repetitive with all the Status patches. Needs a refactoring in cass-operator since this is a known + // pattern. https://github.com/k8ssandra/cass-operator/issues/669 + if currentSize.Cmp(createdSize) > 0 { + dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy()) + if updated := rc.setCondition(api.NewDatacenterCondition(api.DatacenterValid, corev1.ConditionFalse)); updated { + if err := rc.Client.Status().Patch(rc.Ctx, rc.Datacenter, dcPatch); err != nil { + rc.ReqLogger.Error(err, "error patching datacenter status for updating") + return result.Error(err) + } + } + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeWarning, events.InvalidDatacenterSpec, "Shrinking CassandraDatacenter PVCs is not supported") + return result.Error(fmt.Errorf("shrinking PVC %s is not supported", claim.Name)) + } + + if currentSize.Cmp(createdSize) < 0 { + rc.ReqLogger.Info("PVC resize request detected", "pvc", claim.Name, "currentSize", currentSize.String(), "createdSize", createdSize.String()) + if !metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation) || rc.Datacenter.Annotations[api.AllowStorageChangesAnnotation] != "true" { + msg := fmt.Sprintf("PVC resize requested, but %s annotation is not set to 'true'", api.AllowStorageChangesAnnotation) + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeWarning, events.InvalidDatacenterSpec, msg) + return result.Error(fmt.Errorf(msg)) + } + + if !supportsExpansion { + msg := fmt.Sprintf("PVC resize requested, but StorageClass %s does not support expansion", *claim.Spec.StorageClassName) + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeWarning, events.InvalidDatacenterSpec, msg) + dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy()) + if updated := rc.setCondition(api.NewDatacenterCondition(api.DatacenterValid, corev1.ConditionFalse)); updated { + if err := rc.Client.Status().Patch(rc.Ctx, rc.Datacenter, dcPatch); err != nil { + rc.ReqLogger.Error(err, "error patching datacenter status for updating") + return result.Error(err) + } + } + return result.Error(fmt.Errorf(msg)) + } + + dcPatch := client.MergeFrom(rc.Datacenter.DeepCopy()) + if updated := rc.setCondition(api.NewDatacenterCondition(api.DatacenterResizingVolumes, corev1.ConditionTrue)); updated { + if err := rc.Client.Status().Patch(rc.Ctx, rc.Datacenter, dcPatch); err != nil { + rc.ReqLogger.Error(err, "error patching datacenter status for updating") + return result.Error(err) + } + } + + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeNormal, events.ResizingPVC, "Resizing PVCs for %s", statefulSet.Name) + + claims, err := rc.listPVCs(claim.Labels) + if err != nil { + return result.Error(err) + } + + pvcNamePrefix := fmt.Sprintf("%s-%s-", claim.Name, statefulSet.Name) + targetPVCs := make([]*corev1.PersistentVolumeClaim, 0) + for _, pvc := range claims { + if strings.HasPrefix(pvc.Name, pvcNamePrefix) { + targetPVCs = append(targetPVCs, &pvc) + } + } + + for _, pvc := range targetPVCs { + if isPVCResizing(pvc) { + return result.RequeueSoon(10) + } + + patch := client.MergeFrom(pvc.DeepCopy()) + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = createdSize + if err := rc.Client.Patch(rc.Ctx, pvc, patch); err != nil { + return result.Error(err) + } + } + + // Update the StatefulSet to reflect the new PVC size + claim.Spec.Resources.Requests[corev1.ResourceStorage] = createdSize + statefulSet.Spec.VolumeClaimTemplates[i] = claim + + return result.Continue() + } + } + + return result.Continue() +} + func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult { logger := rc.ReqLogger dc := rc.Datacenter - logger.Info("starting CheckRackPodTemplate()") + logger.Info("reconcile_racks::CheckRackPodTemplate") for idx := range rc.desiredRackInformation { rackName := rc.desiredRackInformation[idx].RackName @@ -233,10 +372,11 @@ func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult { WithValues("rackName", rackName). Info("update is blocked, but statefulset needs an update. Marking datacenter as requiring update.") dcPatch := client.MergeFrom(dc.DeepCopy()) - rc.setCondition(api.NewDatacenterCondition(api.DatacenterRequiresUpdate, corev1.ConditionTrue)) - if err := rc.Client.Status().Patch(rc.Ctx, dc, dcPatch); err != nil { - logger.Error(err, "error patching datacenter status for updating") - return result.Error(err) + if updated := rc.setCondition(api.NewDatacenterCondition(api.DatacenterRequiresUpdate, corev1.ConditionTrue)); updated { + if err := rc.Client.Status().Patch(rc.Ctx, dc, dcPatch); err != nil { + logger.Error(err, "error patching datacenter status for updating") + return result.Error(err) + } } return result.Continue() } @@ -252,7 +392,11 @@ func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult { desiredSts.Annotations = utils.MergeMap(map[string]string{}, statefulSet.Annotations, desiredSts.Annotations) // copy the stuff that can't be updated + if res := rc.CheckVolumeClaimSizes(statefulSet, desiredSts); res.Completed() { + return res + } desiredSts.Spec.VolumeClaimTemplates = statefulSet.Spec.VolumeClaimTemplates + // selector must match podTemplate.Labels, those can't be updated either desiredSts.Spec.Selector = statefulSet.Spec.Selector @@ -290,6 +434,7 @@ func (rc *ReconciliationContext) CheckRackPodTemplate() result.ReconcileResult { statefulSet.SetResourceVersion(resVersion) if err := rc.Client.Update(rc.Ctx, statefulSet); err != nil { if errors.IsInvalid(err) { + rc.Recorder.Eventf(rc.Datacenter, corev1.EventTypeNormal, events.RecreatingStatefulSet, "Recreating statefulset %s", statefulSet.Name) if err = rc.deleteStatefulSet(statefulSet); err != nil { return result.Error(err) } @@ -989,6 +1134,7 @@ func (rc *ReconciliationContext) UpdateCassandraNodeStatus(force bool) error { } } + logger.Info("Setting state", "Running", isMgmtApiRunning(pod), "pod", pod.Name) if pod.Status.PodIP != "" && isMgmtApiRunning(pod) { ip := getRpcAddress(dc, pod) nodeStatus.IP = ip @@ -1949,7 +2095,9 @@ func (rc *ReconciliationContext) hasAdditionalSeeds() bool { func (rc *ReconciliationContext) startNode(pod *corev1.Pod, labelSeedBeforeStart bool, endpointData httphelper.CassMetadataEndpoints) (bool, error) { if pod == nil { return true, nil - } else if !isServerReady(pod) { + } + + if !isServerReady(pod) { if isServerReadyToStart(pod) && isMgmtApiRunning(pod) { // this is the one exception to all seed labelling happening in labelSeedPods() @@ -2435,6 +2583,10 @@ func (rc *ReconciliationContext) ReconcileAllRacks() (reconcile.Result, error) { return recResult.Output() } + if recResult := rc.CheckPVCResizing(); recResult.Completed() { + return recResult.Output() + } + if recResult := rc.CheckRackPodTemplate(); recResult.Completed() { return recResult.Output() } diff --git a/pkg/reconciliation/reconcile_racks_test.go b/pkg/reconciliation/reconcile_racks_test.go index 663f3048..219fc1a6 100644 --- a/pkg/reconciliation/reconcile_racks_test.go +++ b/pkg/reconciliation/reconcile_racks_test.go @@ -29,6 +29,8 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -407,6 +409,8 @@ func TestCheckRackPodTemplate_TemplateLabels(t *testing.T) { }, } + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( nil, "default", @@ -421,10 +425,7 @@ func TestCheckRackPodTemplate_TemplateLabels(t *testing.T) { desiredStatefulSet.Status.ObservedGeneration = 1 desiredStatefulSet.Status.ReadyReplicas = int32(1) - trackObjects := []runtime.Object{ - desiredStatefulSet, - rc.Datacenter, - } + require.NoError(rc.Client.Create(rc.Ctx, desiredStatefulSet)) nextRack := &RackInformation{} nextRack.RackName = "default" @@ -437,7 +438,6 @@ func TestCheckRackPodTemplate_TemplateLabels(t *testing.T) { rc.statefulSets = make([]*appsv1.StatefulSet, len(rackInfo)) rc.statefulSets[0] = desiredStatefulSet - rc.Client = fake.NewClientBuilder().WithStatusSubresource(rc.Datacenter, rc.statefulSets[0]).WithRuntimeObjects(trackObjects...).Build() res := rc.CheckRackPodTemplate() require.Equal(result.Done(), res) rc.statefulSets[0].Status.ObservedGeneration = rc.statefulSets[0].Generation @@ -2447,3 +2447,285 @@ func TestFindHostIdForIpFromEndpointsData(t *testing.T) { assert.Equal(t, tests[i].result.ready, ready, "expected ready to be %v", tests[i].result.ready) } } + +func TestCheckVolumeClaimSizesValidation(t *testing.T) { + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + require := require.New(t) + + // No changes test - should not result in any error + originalStatefulSet, err := newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + require.NoError(rc.Client.Create(rc.Ctx, originalStatefulSet)) + + res := rc.CheckVolumeClaimSizes(originalStatefulSet, originalStatefulSet) + require.Equal(result.Continue(), res, "No changes, we should continue") + + // Use case, we do not have expansion allowed in our StorageClass, should get Valid False state in CassandraDatacenter + rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: resource.MustParse("2Gi")} + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err := newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Error(fmt.Errorf("PVC resize requested, but cassandra.datastax.com/allow-storage-changes annotation is not set to 'true'")), res, "We should have an error, feature flag is not set") + + metav1.SetMetaDataAnnotation(&rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation, "true") + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Error(fmt.Errorf("PVC resize requested, but StorageClass standard does not support expansion")), res, "We should have an error, StorageClass does not allow expansion") + cond, found := rc.Datacenter.GetCondition(api.DatacenterValid) + require.True(found) + require.Equal(corev1.ConditionFalse, cond.Status) + + // Verify we didn't try to shrink either + rc.Datacenter.SetCondition(api.DatacenterCondition{ + Status: corev1.ConditionTrue, + Type: api.DatacenterValid, + }) + + rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: resource.MustParse("0.5Gi")} + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err = newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Error(fmt.Errorf("shrinking PVC %s is not supported", originalStatefulSet.Spec.VolumeClaimTemplates[0].Name)), res, "We should have an error, shrinking is disabled") + cond, found = rc.Datacenter.GetCondition(api.DatacenterValid) + require.True(found) + require.Equal(corev1.ConditionFalse, cond.Status) + + // Verify adding new AdditionalVolumes with size is allowed - even if this doesn't actually add them + rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") + rc.Datacenter.Spec.StorageConfig.AdditionalVolumes = api.AdditionalVolumesSlice{ + api.AdditionalVolumes{ + MountPath: "/var/log/cassandra", + Name: "server-logs", + PVCSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To[string]("standard"), + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: resource.MustParse("384Mi")}, + }, + }, + }, + } + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err = newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Continue(), res, "No resize changes, we should continue") + + // Verify adding AdditionalVolumes without sizes are supported + rc.Datacenter.Spec.StorageConfig.AdditionalVolumes = append(rc.Datacenter.Spec.StorageConfig.AdditionalVolumes, + api.AdditionalVolumes{ + MountPath: "/configs/metrics", + Name: "metrics-config", + VolumeSource: &corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "metrics-config-map", + }, + }, + }, + }) + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err = newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Continue(), res, "No resize changes, we should continue") +} + +func TestVolumeClaimSizesExpansion(t *testing.T) { + // Verify the StatefulSet is also deleted when the PVC size is changed + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + require := require.New(t) + + // Sanity check, no changes yet - should not result in any error + originalStatefulSet, err := newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + require.NoError(rc.Client.Create(rc.Ctx, originalStatefulSet)) + + // Create the PVCs for the StatefulSet + for i := 0; i < int(*originalStatefulSet.Spec.Replicas); i++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("server-data-%s-%d", originalStatefulSet.Name, i), + Namespace: "default", + }, + } + pvc.Spec = originalStatefulSet.Spec.VolumeClaimTemplates[0].Spec + pvc.Labels = originalStatefulSet.Spec.VolumeClaimTemplates[0].Labels + require.NoError(rc.Client.Create(rc.Ctx, pvc)) + } + + // Mark the StorageClass as allowing expansion and Datacenter to allow expansion + storageClass := &storagev1.StorageClass{} + require.NoError(rc.Client.Get(rc.Ctx, types.NamespacedName{Name: "standard"}, storageClass)) + storageClass.AllowVolumeExpansion = ptr.To[bool](true) + require.NoError(rc.Client.Update(rc.Ctx, storageClass)) + metav1.SetMetaDataAnnotation(&rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation, "true") + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + + res := rc.CheckVolumeClaimSizes(originalStatefulSet, originalStatefulSet) + require.Equal(result.Continue(), res, "No changes, we should continue") + + rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: resource.MustParse("2Gi")} + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + desiredStatefulSet, err := newStatefulSetForCassandraDatacenter(nil, "default", rc.Datacenter, 2) + require.NoErrorf(err, "error occurred creating statefulset") + res = rc.CheckVolumeClaimSizes(originalStatefulSet, desiredStatefulSet) + require.Equal(result.Continue(), res, "We made changes to the PVC size") + + cond, found := rc.Datacenter.GetCondition(api.DatacenterResizingVolumes) + require.True(found) + require.Equal(corev1.ConditionTrue, cond.Status) + + pvcs, err := rc.listPVCs(originalStatefulSet.Spec.VolumeClaimTemplates[0].Labels) + require.NoError(err) + for _, pvc := range pvcs { + require.Equal(*rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests.Storage(), pvc.Spec.Resources.Requests[corev1.ResourceStorage], fmt.Sprintf("PVC %s should have been resized", pvc.Name)) + require.Equal(resource.MustParse("2Gi"), pvc.Spec.Resources.Requests[corev1.ResourceStorage], fmt.Sprintf("PVC %s should have been resized to 2Gi", pvc.Name)) + } +} + +func TestCheckPVCResizing(t *testing.T) { + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + require := require.New(t) + + // Create a PVC for the StatefulSet + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server-data-cassandra-dc1-default-sts-0", + Namespace: "default", + Labels: rc.Datacenter.GetRackLabels("rack1"), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To[string]("standard"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + } + require.NoError(rc.Client.Create(rc.Ctx, pvc)) + res := rc.CheckPVCResizing() + require.Equal(result.Continue(), res, "No resizing in progress, we should simply continue") + + pvc.Status.Conditions = []corev1.PersistentVolumeClaimCondition{{ + Type: corev1.PersistentVolumeClaimResizing, + Status: corev1.ConditionTrue, + }} + require.NoError(rc.Client.Status().Update(rc.Ctx, pvc)) + + res = rc.CheckPVCResizing() + require.Equal(result.RequeueSoon(10), res, "PVC resizing is in progress, we should requeue") + + pvc.Status.Conditions = []corev1.PersistentVolumeClaimCondition{{ + Type: corev1.PersistentVolumeClaimResizing, + Status: corev1.ConditionFalse, + }} + require.NoError(rc.Client.Status().Update(rc.Ctx, pvc)) + + // Verify datacenter status resizing is removed if nothing is being resized anymore + rc.Datacenter.SetCondition(api.DatacenterCondition{ + Status: corev1.ConditionTrue, + Type: api.DatacenterResizingVolumes, + }) + require.NoError(rc.Client.Status().Update(rc.Ctx, rc.Datacenter)) + + res = rc.CheckPVCResizing() + require.Equal(result.Continue(), res, "No resizing in progress, we should simply continue") + + cond, found := rc.Datacenter.GetCondition(api.DatacenterResizingVolumes) + require.True(found) + require.Equal(corev1.ConditionFalse, cond.Status) + + // Create another PVC, not related to our Datacenter and check it is ignored + pvc2 := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server-data-cassandra-dc2-default-sts-0", + Namespace: "default", + Labels: rc.Datacenter.GetRackLabels("rack1"), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To[string]("standard"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + } + pvc2.Labels[api.DatacenterLabel] = "dc2" + require.NoError(rc.Client.Create(rc.Ctx, pvc2)) + pvc2.Status.Conditions = []corev1.PersistentVolumeClaimCondition{{ + Type: corev1.PersistentVolumeClaimResizing, + Status: corev1.ConditionTrue, + }} + require.NoError(rc.Client.Status().Update(rc.Ctx, pvc)) + res = rc.CheckPVCResizing() + require.Equal(result.Continue(), res, "No resizing in progress, we should simply continue") +} + +func TestCheckRackPodTemplateWithVolumeExpansion(t *testing.T) { + require := require.New(t) + rc, _, cleanpMockSrc := setupTest() + defer cleanpMockSrc() + + require.NoError(rc.CalculateRackInformation()) + res := rc.CheckRackCreation() + require.False(res.Completed(), "CheckRackCreation did not complete as expected") + + require.Equal(result.Continue(), rc.CheckRackPodTemplate()) + + metav1.SetMetaDataAnnotation(&rc.Datacenter.ObjectMeta, api.AllowStorageChangesAnnotation, "true") + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + + // Get the current StS + sts := &appsv1.StatefulSet{} + nsName := newNamespacedNameForStatefulSet(rc.Datacenter, "default") + require.NoError(rc.Client.Get(rc.Ctx, nsName, sts)) + require.Equal(resource.MustParse("1Gi"), sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage]) + + // Create the PVCs for the StatefulSet + for i := 0; i < int(*sts.Spec.Replicas); i++ { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("server-data-%s-%d", sts.Name, i), + Namespace: "default", + }, + } + pvc.Spec = sts.Spec.VolumeClaimTemplates[0].Spec + pvc.Labels = sts.Spec.VolumeClaimTemplates[0].Labels + require.NoError(rc.Client.Create(rc.Ctx, pvc)) + } + + require.Equal(result.Continue(), rc.CheckRackPodTemplate()) + + rc.Datacenter.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{corev1.ResourceStorage: resource.MustParse("2Gi")} + require.NoError(rc.Client.Update(rc.Ctx, rc.Datacenter)) + res = rc.CheckRackPodTemplate() + require.Equal(result.Error(fmt.Errorf("PVC resize requested, but StorageClass standard does not support expansion")), res, "We should have an error, storageClass does not support expansion") + + // Mark the StorageClass as allowing expansion + storageClass := &storagev1.StorageClass{} + require.NoError(rc.Client.Get(rc.Ctx, types.NamespacedName{Name: "standard"}, storageClass)) + storageClass.AllowVolumeExpansion = ptr.To[bool](true) + require.NoError(rc.Client.Update(rc.Ctx, storageClass)) + + res = rc.CheckRackPodTemplate() + require.Equal(result.Done(), res, "Recreating StS should throw us to silence period") + + require.NoError(rc.Client.Get(rc.Ctx, nsName, sts)) + require.Equal(resource.MustParse("2Gi"), sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage]) + + // The fakeClient behavior does not prevent us from modifying the StS fields, so this test behaves unlike real world in that sense + res = rc.CheckRackPodTemplate() + require.Equal(result.Continue(), res, "Recreating StS should throw us to silence period") + +} diff --git a/pkg/reconciliation/testing.go b/pkg/reconciliation/testing.go index 35e35215..c53a3bfa 100644 --- a/pkg/reconciliation/testing.go +++ b/pkg/reconciliation/testing.go @@ -17,6 +17,7 @@ import ( "github.com/go-logr/logr" mock "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -63,10 +64,10 @@ func CreateMockReconciliationContext( ) storageSize := resource.MustParse("1Gi") - storageName := "server-data" + storageClassName := "standard" storageConfig := api.StorageConfig{ CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageName, + StorageClassName: &storageClassName, AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize}, @@ -74,6 +75,12 @@ func CreateMockReconciliationContext( }, } + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + } + // Instance a cassandraDatacenter cassandraDatacenter := &api.CassandraDatacenter{ ObjectMeta: metav1.ObjectMeta{ @@ -94,6 +101,7 @@ func CreateMockReconciliationContext( trackObjects := []runtime.Object{ cassandraDatacenter, + storageClass, } s := scheme.Scheme diff --git a/tests/pvc_expansion/pvc_expansion_test.go b/tests/pvc_expansion/pvc_expansion_test.go new file mode 100644 index 00000000..f9168297 --- /dev/null +++ b/tests/pvc_expansion/pvc_expansion_test.go @@ -0,0 +1,99 @@ +package pvc_expansion + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + + "github.com/k8ssandra/cass-operator/tests/kustomize" + ginkgo_util "github.com/k8ssandra/cass-operator/tests/util/ginkgo" + "github.com/k8ssandra/cass-operator/tests/util/kubectl" +) + +var ( + testName = "PVC Expansion" + namespace = "pvc-expansion" + dcName = "dc1" + dcYaml = "../testdata/default-single-rack-single-node-dc-lvm.yaml" + podName = "cluster1-dc1-r1-sts-0" + dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) + ns = ginkgo_util.NewWrapper(testName, namespace) +) + +func TestLifecycle(t *testing.T) { + AfterSuite(func() { + logPath := fmt.Sprintf("%s/aftersuite", ns.LogDir) + err := kubectl.DumpAllLogs(logPath).ExecV() + if err != nil { + t.Logf("Failed to dump all the logs: %v", err) + } + + fmt.Printf("\n\tPost-run logs dumped at: %s\n\n", logPath) + ns.Terminate() + err = kustomize.Undeploy(namespace) + if err != nil { + t.Logf("Failed to undeploy cass-operator: %v", err) + } + }) + + RegisterFailHandler(Fail) + RunSpecs(t, testName) +} + +var _ = Describe(testName, func() { + Context("when in a new cluster", func() { + Specify("operator is installed and cluster is created", func() { + By("deploy cass-operator with kustomize") + err := kustomize.Deploy(namespace) + Expect(err).ToNot(HaveOccurred()) + + ns.WaitForOperatorReady() + + step := "creating a datacenter resource with 1 rack/1 node" + testFile, err := ginkgo_util.CreateTestFile(dcYaml) + Expect(err).ToNot(HaveOccurred()) + + k := kubectl.ApplyFiles(testFile) + ns.ExecAndLog(step, k) + + ns.WaitForDatacenterReady(dcName) + }) + Specify("user is able to expand the existing server-data", func() { + step := "retrieve the persistent volume claim" + json := "jsonpath={.spec.volumes[?(.name=='server-data')].persistentVolumeClaim.claimName}" + k := kubectl.Get("pod", podName).FormatOutput(json) + pvcName := ns.OutputAndLog(step, k) + + step = "find PVC volume" + json = "jsonpath={.spec.volumeName}" + k = kubectl.Get("pvc", pvcName).FormatOutput(json) + pvName := ns.OutputAndLog(step, k) + + step = "find the PV volume size" + json = "jsonpath={.spec.capacity.storage}" + k = kubectl.Get("pv", pvName).FormatOutput(json) + existingPvSize := ns.OutputAndLog(step, k) + Expect(existingPvSize).To(Equal("1Gi"), "Expected PV size to be 1Gi but got %s", existingPvSize) + + step = "patch CassandraDatacenter to increase the StorageConfig size" + patch := fmt.Sprintf(`{"spec":{"storageConfig":{"cassandraDataVolumeClaimSpec":{"resources":{"requests":{"storage":"%s"}}}}}}`, "2Gi") + k = kubectl.PatchMerge(dcResource, patch) + ns.ExecAndLog(step, k) + + ns.WaitForDatacenterCondition(dcName, "ResizingVolumes", string(corev1.ConditionTrue)) + ns.WaitForDatacenterReady(dcName) + ns.WaitForDatacenterCondition(dcName, "ResizingVolumes", string(corev1.ConditionFalse)) + + step = "find the PV volume size" + json = "jsonpath={.spec.capacity.storage}" + k = kubectl.Get("pv", pvName).FormatOutput(json) + pvSize := ns.OutputAndLog(step, k) + + Expect(pvSize).To(Equal("2Gi"), "Expected PV size to be 2Gi but got %s", pvSize) + }) + }) +}) diff --git a/tests/testdata/default-single-rack-single-node-dc-lvm.yaml b/tests/testdata/default-single-rack-single-node-dc-lvm.yaml new file mode 100644 index 00000000..61c51e2e --- /dev/null +++ b/tests/testdata/default-single-rack-single-node-dc-lvm.yaml @@ -0,0 +1,27 @@ +apiVersion: cassandra.datastax.com/v1beta1 +kind: CassandraDatacenter +metadata: + name: dc1 + annotations: + cassandra.datastax.com/allow-storage-changes: "true" +spec: + clusterName: cluster1 + serverType: cassandra + serverVersion: "4.1.4" + managementApiAuth: + insecure: {} + size: 1 + storageConfig: + cassandraDataVolumeClaimSpec: + storageClassName: topolvm-provisioner-thin + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + racks: + - name: r1 + config: + jvm-server-options: + initial_heap_size: "512m" + max_heap_size: "512m" diff --git a/tests/util/kubectl/kubectl.go b/tests/util/kubectl/kubectl.go index e5da4e06..5f6f661d 100644 --- a/tests/util/kubectl/kubectl.go +++ b/tests/util/kubectl/kubectl.go @@ -364,15 +364,8 @@ func DumpClusterInfo(path string, namespace string) { dumpCmd := DumpLogs(path, namespace) dumpCmd.ExecVPanic() - // Store the list of pods in an easy to read format. - podWide := Get("pods", "-o", "wide", "-n", namespace).OutputPanic() - storeOutput(path, "pods", "out", podWide) - - describePods := Describe("pods", "-n", namespace).OutputPanic() - storeOutput(path, "pods-describe", "out", describePods) - // Dump all objects that we need to investigate failures as a flat list and as yaml manifests - for _, objectType := range []string{"CassandraDatacenter", "CassandraTask"} { + for _, objectType := range []string{"statefulsets", "pvc", "pv", "pods", "CassandraDatacenter", "CassandraTask"} { // Get the list of objects output, _ := Get(objectType, "-o", "wide", "-n", namespace).Output() storeOutput(path, objectType, "out", output) @@ -381,6 +374,12 @@ func DumpClusterInfo(path string, namespace string) { output, _ = Get(objectType, "-o", "yaml", "-n", namespace).Output() storeOutput(path, objectType, "yaml", output) } + + // For describe information + for _, objectType := range []string{"statefulsets", "pods", "pvc", "pv"} { + output, _ := Describe(objectType, "-n", namespace).Output() + storeOutput(path, objectType, "out", output) + } } func storeOutput(path, objectType, ext, output string) { From 83acde6a26f641cd54fdcf94157c8e7ae8bc19f2 Mon Sep 17 00:00:00 2001 From: Michael Burman Date: Mon, 15 Jul 2024 18:59:57 +0300 Subject: [PATCH 2/2] Remove commented out stuff --- .../cassandradatacenter_controller_test.go | 58 ------------------- pkg/reconciliation/handler.go | 2 - 2 files changed, 60 deletions(-) diff --git a/internal/controllers/cassandra/cassandradatacenter_controller_test.go b/internal/controllers/cassandra/cassandradatacenter_controller_test.go index b0a089dd..2b0fe192 100644 --- a/internal/controllers/cassandra/cassandradatacenter_controller_test.go +++ b/internal/controllers/cassandra/cassandradatacenter_controller_test.go @@ -184,64 +184,6 @@ var _ = Describe("CassandraDatacenter tests", func() { verifyDatacenterDeleted(ctx, dcName).Should(Succeed()) }) }) - // This isn't functional with envtest at the moment, fails with (only in envtest): - /* - 2024-07-04T17:05:07.636+0300 INFO PersistentVolumeClaim "server-data-cluster1-dc12-r0-sts-0" is invalid: spec: Forbidden: spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims - core.PersistentVolumeClaimSpec{ - AccessModes: {"ReadWriteOnce"}, - Selector: nil, - Resources: core.VolumeResourceRequirements{ - Limits: nil, - - Requests: core.ResourceList{ - - s"storage": {i: resource.int64Amount{value: 1073741824}, s: "1Gi", Format: "BinarySI"}, - - }, - + Requests: core.ResourceList{ - + s"storage": {i: resource.int64Amount{value: 2147483648}, s: "2Gi", Format: "BinarySI"}, - + }, - }, - */ - /* - Context("Single datacenter modifications", func() { - It("should be able to expand PVC", func(ctx SpecContext) { - dcName := "dc12" - - dc := createDatacenter(ctx, dcName, 1, 1) - waitForDatacenterReady(ctx, dcName) - createStorageClass(ctx, "default") - - verifyStsCount(ctx, dcName, 1, 1) - verifyPodCount(ctx, dcName, 1) - waitForDatacenterReady(ctx, dcName) - - By("updating the storageSize to 2Gi") - refreshDatacenter(ctx, &dc) - patch := client.MergeFrom(dc.DeepCopy()) - metav1.SetMetaDataAnnotation(&dc.ObjectMeta, "cassandra.datastax.com/allow-storage-changes", "true") - dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("2Gi") - Expect(k8sClient.Patch(ctx, &dc, patch)).To(Succeed()) - // Expect(k8sClient.Update(ctx, &dc)).To(Succeed()) - - waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionTrue).Should(Succeed()) - waitForDatacenterReady(ctx, dcName) - waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterResizingVolumes, corev1.ConditionFalse).Should(Succeed()) - - // Verify the StS was updated - verifyStsCount(ctx, dcName, 1, 1) - stsAll := &appsv1.StatefulSetList{} - - Expect(k8sClient.List(ctx, stsAll, client.MatchingLabels{cassdcapi.DatacenterLabel: dcName}, client.InNamespace(testNamespaceName))).To(Succeed()) - Expect(len(stsAll.Items)).To(Equal(1)) - - for _, sts := range stsAll.Items { - claimSize := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] - Expect("2Gi").To(Equal(claimSize.String())) - } - - deleteDatacenter(ctx, dcName) - verifyDatacenterDeleted(ctx, dcName) - }) - }) - */ }) }) diff --git a/pkg/reconciliation/handler.go b/pkg/reconciliation/handler.go index 7c48f632..772c78dd 100644 --- a/pkg/reconciliation/handler.go +++ b/pkg/reconciliation/handler.go @@ -121,8 +121,6 @@ func (rc *ReconciliationContext) IsValid(dc *api.CassandraDatacenter) error { return errs[0] } - // TODO Verify if we can expand the PVC or should we reject changes - claim := dc.Spec.StorageConfig.CassandraDataVolumeClaimSpec if claim == nil { err := fmt.Errorf("storageConfig.cassandraDataVolumeClaimSpec is required")