diff --git a/apis/cassandra/v1beta1/cassandradatacenter_types.go b/apis/cassandra/v1beta1/cassandradatacenter_types.go index 351430b0..24558a03 100644 --- a/apis/cassandra/v1beta1/cassandradatacenter_types.go +++ b/apis/cassandra/v1beta1/cassandradatacenter_types.go @@ -261,8 +261,7 @@ type CassandraDatacenterSpec struct { // CDC allows configuration of the change data capture agent which can run within the Management API container. Use it to send data to Pulsar. CDC *CDCConfiguration `json:"cdc,omitempty"` - // DatacenterName allows to override the name of the Cassandra datacenter. Kubernetes objects will be named after a sanitized version of it if set, and if not metadata.name. In Cassandra the DC name will be overridden by this value. - // It may generate some confusion as objects created for the DC will have a different name than the CasandraDatacenter object itself. + // DatacenterName allows to override the name of the Cassandra datacenter. In Cassandra the DC name will be overridden by this value. // This setting can create conflicts if multiple DCs coexist in the same namespace if metadata.name for a DC with no override is set to the same value as the override name of another DC. // Use cautiously. // +optional @@ -488,6 +487,9 @@ type CassandraDatacenterStatus struct { // This field is used to perform validation checks preventing a user from changing the override // +optional DatacenterName *string `json:"datacenterName,omitempty"` + + // +optional + MetadataVersion int64 `json:"metadataVersion,omitempty"` } // CassandraDatacenter is the Schema for the cassandradatacenters API @@ -599,7 +601,7 @@ func (dc *CassandraDatacenter) SetCondition(condition DatacenterCondition) { // GetDatacenterLabels ... func (dc *CassandraDatacenter) GetDatacenterLabels() map[string]string { labels := dc.GetClusterLabels() - labels[DatacenterLabel] = CleanLabelValue(dc.DatacenterName()) + labels[DatacenterLabel] = CleanLabelValue(dc.Name) return labels } @@ -664,19 +666,19 @@ func (dc *CassandraDatacenter) GetSeedServiceName() string { } func (dc *CassandraDatacenter) GetAdditionalSeedsServiceName() string { - return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-additional-seed-service" + return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-additional-seed-service" } func (dc *CassandraDatacenter) GetAllPodsServiceName() string { - return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-all-pods-service" + return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-all-pods-service" } func (dc *CassandraDatacenter) GetDatacenterServiceName() string { - return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-service" + return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-service" } func (dc *CassandraDatacenter) GetNodePortServiceName() string { - return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-node-port-service" + return CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-node-port-service" } func (dc *CassandraDatacenter) ShouldGenerateSuperuserSecret() bool { @@ -973,9 +975,17 @@ func SplitRacks(nodeCount, rackCount int) []int { return topology } -// SanitizedName returns a sanitized version of the name returned by DatacenterName() -func (dc *CassandraDatacenter) SanitizedName() string { - return CleanupForKubernetes(dc.DatacenterName()) +func (dc *CassandraDatacenter) DatacenterNameStatus() bool { + return dc.Status.DatacenterName != nil +} + +// LabelResourceName returns a sanitized version of the name returned by DatacenterName() +func (dc *CassandraDatacenter) LabelResourceName() string { + // If existing cluster, return dc.DatacenterName() else return dc.Name + if dc.DatacenterNameStatus() { + return CleanupForKubernetes(*dc.Status.DatacenterName) + } + return CleanupForKubernetes(dc.Name) } // DatacenterName returns the Cassandra DC name override if it exists, diff --git a/apis/cassandra/v1beta1/cassandradatacenter_types_test.go b/apis/cassandra/v1beta1/cassandradatacenter_types_test.go index ed339317..b3380fa0 100644 --- a/apis/cassandra/v1beta1/cassandradatacenter_types_test.go +++ b/apis/cassandra/v1beta1/cassandradatacenter_types_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "testing" + "github.com/Jeffail/gabs/v2" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -190,3 +191,50 @@ func TestUseClientImageEnforce(t *testing.T) { assert.True(dc.UseClientImage()) } } + +func TestDatacenterNoOverrideConfig(t *testing.T) { + assert := assert.New(t) + dc := CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc1", + }, + Spec: CassandraDatacenterSpec{ + ClusterName: "cluster1", + }, + } + + config, err := dc.GetConfigAsJSON(dc.Spec.Config) + assert.NoError(err) + + container, err := gabs.ParseJSON([]byte(config)) + assert.NoError(err) + + dataCenterInfo := container.ChildrenMap()["datacenter-info"] + assert.NotEmpty(dataCenterInfo) + assert.Equal(dc.Name, dataCenterInfo.ChildrenMap()["name"].Data().(string)) + assert.Equal(dc.DatacenterName(), dc.Name) +} + +func TestDatacenterOverrideInConfig(t *testing.T) { + assert := assert.New(t) + dc := CassandraDatacenter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dc1", + }, + Spec: CassandraDatacenterSpec{ + ClusterName: "cluster1", + DatacenterName: "Home_Dc", + }, + } + + config, err := dc.GetConfigAsJSON(dc.Spec.Config) + assert.NoError(err) + + container, err := gabs.ParseJSON([]byte(config)) + assert.NoError(err) + + dataCenterInfo := container.ChildrenMap()["datacenter-info"] + assert.NotEmpty(dataCenterInfo) + assert.Equal(dc.Spec.DatacenterName, dataCenterInfo.ChildrenMap()["name"].Data().(string)) + assert.Equal(dc.DatacenterName(), dc.Spec.DatacenterName) +} diff --git a/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml b/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml index 2d7a9493..6ae1bd4b 100644 --- a/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml +++ b/config/crd/bases/cassandra.datastax.com_cassandradatacenters.yaml @@ -300,8 +300,7 @@ spec: type: string datacenterName: description: |- - DatacenterName allows to override the name of the Cassandra datacenter. Kubernetes objects will be named after a sanitized version of it if set, and if not metadata.name. In Cassandra the DC name will be overridden by this value. - It may generate some confusion as objects created for the DC will have a different name than the CasandraDatacenter object itself. + DatacenterName allows to override the name of the Cassandra datacenter. In Cassandra the DC name will be overridden by this value. This setting can create conflicts if multiple DCs coexist in the same namespace if metadata.name for a DC with no override is set to the same value as the override name of another DC. Use cautiously. type: string @@ -11221,6 +11220,9 @@ spec: with the management API format: date-time type: string + metadataVersion: + format: int64 + type: integer nodeReplacements: items: type: string diff --git a/config/manager/image_config.yaml b/config/manager/image_config.yaml index 04aed7f0..2e85d348 100644 --- a/config/manager/image_config.yaml +++ b/config/manager/image_config.yaml @@ -18,7 +18,7 @@ defaults: # Note, postfix is ignored if repository is not set cassandra: repository: "k8ssandra/cass-management-api" - suffix: "-ubi8" + suffix: "-ubi" dse: repository: "datastax/dse-mgmtapi-6_8" suffix: "-ubi8" diff --git a/internal/controllers/cassandra/cassandradatacenter_controller_test.go b/internal/controllers/cassandra/cassandradatacenter_controller_test.go index 2b0fe192..f172a7c2 100644 --- a/internal/controllers/cassandra/cassandradatacenter_controller_test.go +++ b/internal/controllers/cassandra/cassandradatacenter_controller_test.go @@ -155,8 +155,9 @@ var _ = Describe("CassandraDatacenter tests", func() { refreshDatacenter(ctx, &dc) By("Updating the size to 3") + patch := client.MergeFrom(dc.DeepCopy()) dc.Spec.Size = 3 - Expect(k8sClient.Update(ctx, &dc)).To(Succeed()) + Expect(k8sClient.Patch(ctx, &dc, patch)).To(Succeed()) waitForDatacenterCondition(ctx, dcName, cassdcapi.DatacenterScalingUp, corev1.ConditionTrue).Should(Succeed()) waitForDatacenterProgress(ctx, dcName, cassdcapi.ProgressUpdating).Should(Succeed()) diff --git a/internal/controllers/control/cassandratask_controller.go b/internal/controllers/control/cassandratask_controller.go index 6aaa5627..86a7a744 100644 --- a/internal/controllers/control/cassandratask_controller.go +++ b/internal/controllers/control/cassandratask_controller.go @@ -202,7 +202,7 @@ func (r *CassandraTaskReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, errors.Wrapf(err, "unable to fetch target CassandraDatacenter: %s", cassTask.Spec.Datacenter) } - logger = log.FromContext(ctx, "datacenterName", dc.SanitizedName(), "clusterName", dc.Spec.ClusterName) + logger = log.FromContext(ctx, "datacenterName", dc.LabelResourceName(), "clusterName", dc.Spec.ClusterName) log.IntoContext(ctx, logger) // If we're active, we can proceed - otherwise verify if we're allowed to run diff --git a/pkg/images/images_test.go b/pkg/images/images_test.go index 13582e19..07eb9a69 100644 --- a/pkg/images/images_test.go +++ b/pkg/images/images_test.go @@ -94,7 +94,7 @@ func TestDefaultImageConfigParsing(t *testing.T) { path, err = GetCassandraImage("cassandra", "4.1.4") assert.NoError(err) - assert.Equal("k8ssandra/cass-management-api:4.1.4-ubi8", path) + assert.Equal("k8ssandra/cass-management-api:4.1.4-ubi", path) } func TestImageConfigParsing(t *testing.T) { diff --git a/pkg/reconciliation/construct_podtemplatespec.go b/pkg/reconciliation/construct_podtemplatespec.go index 4caa80ed..38ea804a 100644 --- a/pkg/reconciliation/construct_podtemplatespec.go +++ b/pkg/reconciliation/construct_podtemplatespec.go @@ -379,7 +379,7 @@ func addVolumes(dc *api.CassandraDatacenter, baseTemplate *corev1.PodTemplateSpe Name: "encryption-cred-storage", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-keystore", dc.SanitizedName()), + SecretName: fmt.Sprintf("%s-keystore", dc.LabelResourceName()), }, }, } @@ -614,7 +614,7 @@ func getConfigDataEnVars(dc *api.CassandraDatacenter) ([]corev1.EnvVar, error) { return envVars, nil } - return nil, fmt.Errorf("datacenter %s is missing %s annotation", dc.SanitizedName(), api.ConfigHashAnnotation) + return nil, fmt.Errorf("datacenter %s is missing %s annotation", dc.LabelResourceName(), api.ConfigHashAnnotation) } configData, err := dc.GetConfigAsJSON(dc.Spec.Config) diff --git a/pkg/reconciliation/construct_podtemplatespec_test.go b/pkg/reconciliation/construct_podtemplatespec_test.go index 43ef4265..47824b96 100644 --- a/pkg/reconciliation/construct_podtemplatespec_test.go +++ b/pkg/reconciliation/construct_podtemplatespec_test.go @@ -1548,7 +1548,7 @@ func Test_makeImage(t *testing.T) { serverType: "cassandra", serverVersion: "3.11.10", }, - want: "localhost:5000/k8ssandra/cass-management-api:3.11.10-ubi8", + want: "localhost:5000/k8ssandra/cass-management-api:3.11.10-ubi", errString: "", }, { diff --git a/pkg/reconciliation/construct_statefulset.go b/pkg/reconciliation/construct_statefulset.go index f9227710..a8ed4fbc 100644 --- a/pkg/reconciliation/construct_statefulset.go +++ b/pkg/reconciliation/construct_statefulset.go @@ -26,7 +26,7 @@ func newNamespacedNameForStatefulSet( dc *api.CassandraDatacenter, rackName string) types.NamespacedName { - name := api.CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-" + api.CleanupSubdomain(rackName) + "-sts" + name := api.CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-" + api.CleanupSubdomain(rackName) + "-sts" ns := dc.Namespace return types.NamespacedName{ diff --git a/pkg/reconciliation/construct_statefulset_test.go b/pkg/reconciliation/construct_statefulset_test.go index a0530b9e..f53983c1 100644 --- a/pkg/reconciliation/construct_statefulset_test.go +++ b/pkg/reconciliation/construct_statefulset_test.go @@ -641,7 +641,7 @@ func Test_newStatefulSetForCassandraDatacenter_dcNameOverride(t *testing.T) { oplabels.NameLabel: oplabels.NameLabelValue, oplabels.CreatedByLabel: oplabels.CreatedByLabelValue, oplabels.VersionLabel: "4.0.1", - api.DatacenterLabel: "MySuperDC", + api.DatacenterLabel: "dc1", api.ClusterLabel: "piclem", api.RackLabel: dc.Spec.Racks[0].Name, } @@ -652,7 +652,7 @@ func Test_newStatefulSetForCassandraDatacenter_dcNameOverride(t *testing.T) { oplabels.NameLabel: oplabels.NameLabelValue, oplabels.CreatedByLabel: oplabels.CreatedByLabelValue, oplabels.VersionLabel: "4.0.1", - api.DatacenterLabel: "MySuperDC", + api.DatacenterLabel: "dc1", api.ClusterLabel: "piclem", api.RackLabel: dc.Spec.Racks[0].Name, api.CassNodeState: stateReadyToStart, diff --git a/pkg/reconciliation/constructor.go b/pkg/reconciliation/constructor.go index edb29ab2..dd06171b 100644 --- a/pkg/reconciliation/constructor.go +++ b/pkg/reconciliation/constructor.go @@ -31,7 +31,7 @@ func newPodDisruptionBudgetForDatacenter(dc *api.CassandraDatacenter) *policyv1. pdb := &policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: dc.SanitizedName() + "-pdb", + Name: dc.LabelResourceName() + "-pdb", Namespace: dc.Namespace, Labels: labels, Annotations: anns, @@ -62,8 +62,11 @@ func setOperatorProgressStatus(rc *ReconciliationContext, newState api.ProgressS rc.Datacenter.Status.CassandraOperatorProgress = newState if newState == api.ProgressReady { + if rc.Datacenter.Status.MetadataVersion < 1 { + rc.Datacenter.Status.MetadataVersion = 1 + } if rc.Datacenter.Status.DatacenterName == nil { - rc.Datacenter.Status.DatacenterName = &rc.Datacenter.Spec.DatacenterName + rc.Datacenter.Status.DatacenterName = &rc.Datacenter.Name } } if err := rc.Client.Status().Patch(rc.Ctx, rc.Datacenter, patch); err != nil { @@ -73,17 +76,6 @@ func setOperatorProgressStatus(rc *ReconciliationContext, newState api.ProgressS monitoring.UpdateOperatorDatacenterProgressStatusMetric(rc.Datacenter, newState) - // The allow-upgrade=once annotation is temporary and should be removed after first successful reconcile - if metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.UpdateAllowedAnnotation) && rc.Datacenter.Annotations[api.UpdateAllowedAnnotation] == string(api.AllowUpdateOnce) { - // remove the annotation - patch = client.MergeFrom(rc.Datacenter.DeepCopy()) - delete(rc.Datacenter.ObjectMeta.Annotations, api.UpdateAllowedAnnotation) - if err := rc.Client.Patch(rc.Ctx, rc.Datacenter, patch); err != nil { - rc.ReqLogger.Error(err, "error removing the allow-upgrade=once annotation") - return err - } - } - return nil } @@ -101,5 +93,16 @@ func setDatacenterStatus(rc *ReconciliationContext) error { return err } + // The allow-upgrade=once annotation is temporary and should be removed after first successful reconcile + if metav1.HasAnnotation(rc.Datacenter.ObjectMeta, api.UpdateAllowedAnnotation) && rc.Datacenter.Annotations[api.UpdateAllowedAnnotation] == string(api.AllowUpdateOnce) { + // remove the annotation + patch := client.MergeFrom(rc.Datacenter.DeepCopy()) + delete(rc.Datacenter.ObjectMeta.Annotations, api.UpdateAllowedAnnotation) + if err := rc.Client.Patch(rc.Ctx, rc.Datacenter, patch); err != nil { + rc.ReqLogger.Error(err, "error removing the allow-upgrade=once annotation") + return err + } + } + return nil } diff --git a/pkg/reconciliation/context.go b/pkg/reconciliation/context.go index c3e7dbb0..1069103b 100644 --- a/pkg/reconciliation/context.go +++ b/pkg/reconciliation/context.go @@ -92,7 +92,7 @@ func CreateReconciliationContext( } rc.ReqLogger = rc.ReqLogger. - WithValues("datacenterName", dc.SanitizedName()). + WithValues("datacenterName", dc.LabelResourceName()). WithValues("clusterName", dc.Spec.ClusterName) log.IntoContext(ctx, rc.ReqLogger) @@ -146,8 +146,8 @@ func (rc *ReconciliationContext) validateDatacenterNameConflicts() []error { errs = append(errs, fmt.Errorf("failed to list CassandraDatacenters in namespace %s: %w", dc.Namespace, err)) } else { for _, existingDc := range cassandraDatacenters.Items { - if existingDc.SanitizedName() == dc.SanitizedName() && existingDc.Name != dc.Name { - errs = append(errs, fmt.Errorf("datacenter name/override %s/%s is already in use by CassandraDatacenter %s/%s", dc.Name, dc.SanitizedName(), existingDc.Name, existingDc.SanitizedName())) + if existingDc.LabelResourceName() == dc.LabelResourceName() && existingDc.Name != dc.Name { + errs = append(errs, fmt.Errorf("datacenter name/override %s/%s is already in use by CassandraDatacenter %s/%s", dc.Name, dc.LabelResourceName(), existingDc.Name, existingDc.LabelResourceName())) } } } @@ -164,7 +164,7 @@ func (rc *ReconciliationContext) validateDatacenterNameOverride() []error { return errs } else { if *dc.Status.DatacenterName != dc.Spec.DatacenterName { - errs = append(errs, fmt.Errorf("datacenter %s name override '%s' cannot be changed after creation to '%s'.", dc.Name, dc.Spec.DatacenterName, *dc.Status.DatacenterName)) + errs = append(errs, fmt.Errorf("datacenter %s name override '%s' cannot be changed after creation to '%s'", dc.Name, dc.Spec.DatacenterName, *dc.Status.DatacenterName)) } } diff --git a/pkg/reconciliation/handler_reconcile_test.go b/pkg/reconciliation/handler_reconcile_test.go index 5d995235..1b66984d 100644 --- a/pkg/reconciliation/handler_reconcile_test.go +++ b/pkg/reconciliation/handler_reconcile_test.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -26,7 +27,7 @@ import ( func TestReconcile(t *testing.T) { var ( - name = "cluster-example-cluster" + name = "dc1-example" namespace = "default" size int32 = 2 ) @@ -74,6 +75,7 @@ func TestReconcile(t *testing.T) { Client: fakeClient, Scheme: s, Recorder: record.NewFakeRecorder(100), + Log: ctrl.Log.WithName("controllers").WithName("CassandraDatacenter"), } request := reconcile.Request{ @@ -88,8 +90,8 @@ func TestReconcile(t *testing.T) { t.Fatalf("Reconciliation Failure: (%v)", err) } - if result != (reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}) { - t.Error("Reconcile did not return a correct result.") + if result != (reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}) { + t.Errorf("Reconcile did not return a correct result. (%v)", result) } } diff --git a/pkg/reconciliation/handler_test.go b/pkg/reconciliation/handler_test.go index d32a2f3b..f8bcb60c 100644 --- a/pkg/reconciliation/handler_test.go +++ b/pkg/reconciliation/handler_test.go @@ -220,7 +220,11 @@ func TestConflictingDcNameOverride(t *testing.T) { Spec: api.CassandraDatacenterSpec{ ClusterName: "cluster1", DatacenterName: "CassandraDatacenter_example", - }}} + }, + Status: api.CassandraDatacenterStatus{ + DatacenterName: ptr.To[string]("CassandraDatacenter_example"), + }, + }} }) errs := rc.validateDatacenterNameConflicts() diff --git a/pkg/reconciliation/reconcile_configsecret.go b/pkg/reconciliation/reconcile_configsecret.go index 8ce3625c..2c9bfa01 100644 --- a/pkg/reconciliation/reconcile_configsecret.go +++ b/pkg/reconciliation/reconcile_configsecret.go @@ -131,7 +131,7 @@ func getConfigFromConfigSecret(dc *api.CassandraDatacenter, secret *corev1.Secre // getDatacenterConfigSecretName The format is clusterName-dcName-config func getDatacenterConfigSecretName(dc *api.CassandraDatacenter) string { - return api.CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.SanitizedName() + "-config" + return api.CleanupForKubernetes(dc.Spec.ClusterName) + "-" + dc.LabelResourceName() + "-config" } // getDatacenterConfigSecret Fetches the secret from the api server or creates a new secret diff --git a/pkg/reconciliation/reconcile_datacenter.go b/pkg/reconciliation/reconcile_datacenter.go index f55f67ae..ec8c11e8 100644 --- a/pkg/reconciliation/reconcile_datacenter.go +++ b/pkg/reconciliation/reconcile_datacenter.go @@ -50,13 +50,12 @@ func (rc *ReconciliationContext) ProcessDeletion() result.ReconcileResult { } if _, found := rc.Datacenter.Annotations[api.DecommissionOnDeleteAnnotation]; found { - podList, err := rc.listPods(rc.Datacenter.GetDatacenterLabels()) + dcPods, err := rc.listPods(rc.Datacenter.GetDatacenterLabels()) if err != nil { rc.ReqLogger.Error(err, "Failed to list pods, unable to proceed with deletion") return result.Error(err) } - dcPods := PodPtrsFromPodList(podList) - if len(podList.Items) > 0 { + if len(dcPods) > 0 { rc.ReqLogger.V(1).Info("Deletion is being processed by the decommission check") dcs, err := rc.getClusterDatacenters(dcPods) if err != nil { diff --git a/pkg/reconciliation/reconcile_fql.go b/pkg/reconciliation/reconcile_fql.go index ee3a1143..f6e20ad1 100644 --- a/pkg/reconciliation/reconcile_fql.go +++ b/pkg/reconciliation/reconcile_fql.go @@ -25,7 +25,7 @@ func (rc *ReconciliationContext) CheckFullQueryLogging() result.ReconcileResult rc.ReqLogger.Error(err, "error listing all pods in the cluster to progress full query logging reconciliation") return result.RequeueSoon(2) } - for _, podPtr := range PodPtrsFromPodList(podList) { + for _, podPtr := range podList { features, err := rc.NodeMgmtClient.FeatureSet(podPtr) if err != nil { rc.ReqLogger.Error(err, "failed to verify featureset for FQL support") diff --git a/pkg/reconciliation/reconcile_racks.go b/pkg/reconciliation/reconcile_racks.go index 2d79cb1e..d40012b2 100644 --- a/pkg/reconciliation/reconcile_racks.go +++ b/pkg/reconciliation/reconcile_racks.go @@ -34,10 +34,9 @@ import ( ) var ( - ResultShouldNotRequeue reconcile.Result = reconcile.Result{Requeue: false} - ResultShouldRequeueNow reconcile.Result = reconcile.Result{Requeue: true} - ResultShouldRequeueSoon reconcile.Result = reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second} - ResultShouldRequeueTenSecs reconcile.Result = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} + ResultShouldNotRequeue reconcile.Result = reconcile.Result{Requeue: false} + ResultShouldRequeueNow reconcile.Result = reconcile.Result{Requeue: true} + ResultShouldRequeueSoon reconcile.Result = reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second} QuietDurationFunc func(int) time.Duration = func(secs int) time.Duration { return time.Duration(secs) * time.Second } ) @@ -618,7 +617,7 @@ func (rc *ReconciliationContext) CheckRackStoppedState() result.ReconcileResult emittedStoppingEvent = true } - rackPods := FilterPodListByLabels(rc.dcPods, rc.Datacenter.GetRackLabels(rackInfo.RackName)) + rackPods := rc.rackPods(rackInfo.RackName) nodesDrained := 0 nodeDrainErrors := 0 @@ -751,7 +750,7 @@ func (rc *ReconciliationContext) CheckPodsReady(endpointData httphelper.CassMeta return result.Error(err) } if atLeastOneFirstNodeNotReady { - return result.RequeueSoon(2) + return result.RequeueSoon(10) } // step 3 - if the cluster isn't healthy, that's ok, but go back to step 1 @@ -1432,8 +1431,8 @@ func (rc *ReconciliationContext) isClusterHealthy() bool { func (rc *ReconciliationContext) labelSeedPods(rackInfo *RackInformation) (int, error) { logger := rc.ReqLogger.WithName("labelSeedPods") - rackLabels := rc.Datacenter.GetRackLabels(rackInfo.RackName) - rackPods := FilterPodListByLabels(rc.dcPods, rackLabels) + rackPods := rc.rackPods(rackInfo.RackName) + sort.SliceStable(rackPods, func(i, j int) bool { return rackPods[i].Name < rackPods[j].Name }) @@ -2150,7 +2149,7 @@ func (rc *ReconciliationContext) refreshSeeds() error { return nil } -func (rc *ReconciliationContext) listPods(selector map[string]string) (*corev1.PodList, error) { +func (rc *ReconciliationContext) listPods(selector map[string]string) ([]*corev1.Pod, error) { rc.ReqLogger.Info("reconcile_racks::listPods") listOptions := &client.ListOptions{ @@ -2165,7 +2164,11 @@ func (rc *ReconciliationContext) listPods(selector map[string]string) (*corev1.P }, } - return podList, rc.Client.List(rc.Ctx, podList, listOptions) + if err := rc.Client.List(rc.Ctx, podList, listOptions); err != nil { + return nil, err + } + + return PodPtrsFromPodList(podList), nil } func (rc *ReconciliationContext) CheckRollingRestart() result.ReconcileResult { @@ -2425,21 +2428,40 @@ func (rc *ReconciliationContext) fixMissingPVC() (bool, error) { return false, nil } +func (rc *ReconciliationContext) datacenterPods() []*corev1.Pod { + if rc.dcPods != nil { + return rc.dcPods + } + + dcSelector := rc.Datacenter.GetDatacenterLabels() + dcPods := FilterPodListByLabels(rc.clusterPods, dcSelector) + + if rc.Datacenter.Status.MetadataVersion < 1 && rc.Datacenter.Status.DatacenterName != nil && *rc.Datacenter.Status.DatacenterName == rc.Datacenter.Spec.DatacenterName { + rc.ReqLogger.Info("Fetching datacenter pods with the old metadata version labels") + dcSelector[api.DatacenterLabel] = api.CleanLabelValue(rc.Datacenter.Spec.DatacenterName) + dcPods = append(dcPods, FilterPodListByLabels(rc.clusterPods, dcSelector)...) + } + + return dcPods +} + +func (rc *ReconciliationContext) rackPods(rackName string) []*corev1.Pod { + return FilterPodListByLabels(rc.datacenterPods(), map[string]string{api.RackLabel: rackName}) +} + // ReconcileAllRacks determines if a rack needs to be reconciled. func (rc *ReconciliationContext) ReconcileAllRacks() (reconcile.Result, error) { rc.ReqLogger.Info("reconciliationContext::reconcileAllRacks") logger := rc.ReqLogger - podList, err := rc.listPods(rc.Datacenter.GetClusterLabels()) + pods, err := rc.listPods(rc.Datacenter.GetClusterLabels()) if err != nil { logger.Error(err, "error listing all pods in the cluster") } - rc.clusterPods = PodPtrsFromPodList(podList) - - dcSelector := rc.Datacenter.GetDatacenterLabels() - rc.dcPods = FilterPodListByLabels(rc.clusterPods, dcSelector) + rc.clusterPods = pods + rc.dcPods = rc.datacenterPods() endpointData := rc.getCassMetadataEndpoints() diff --git a/pkg/reconciliation/reconcile_racks_test.go b/pkg/reconciliation/reconcile_racks_test.go index bb53bd67..89cc2eee 100644 --- a/pkg/reconciliation/reconcile_racks_test.go +++ b/pkg/reconciliation/reconcile_racks_test.go @@ -6,7 +6,6 @@ package reconciliation import ( "context" "fmt" - "github.com/pkg/errors" "io" "net/http" "reflect" @@ -16,6 +15,8 @@ import ( "testing" "time" + "github.com/pkg/errors" + "k8s.io/utils/ptr" api "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" @@ -2780,3 +2781,96 @@ func TestDatacenterStatus(t *testing.T) { assert.NoError(err) assert.Equal(float64(0), val) } + +func TestDatacenterPods(t *testing.T) { + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + assert := assert.New(t) + + desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, + "default", + rc.Datacenter, + 3) + assert.NoErrorf(err, "error occurred creating statefulset") + + desiredStatefulSet.Status.ReadyReplicas = *desiredStatefulSet.Spec.Replicas + + trackObjects := []runtime.Object{ + desiredStatefulSet, + rc.Datacenter, + } + + mockPods := mockReadyPodsForStatefulSet(desiredStatefulSet, rc.Datacenter.Spec.ClusterName, rc.Datacenter.Name) + for idx := range mockPods { + mp := mockPods[idx] + trackObjects = append(trackObjects, mp) + } + + rc.Client = fake.NewClientBuilder().WithStatusSubresource(rc.Datacenter).WithRuntimeObjects(trackObjects...).Build() + + nextRack := &RackInformation{} + nextRack.RackName = "default" + nextRack.NodeCount = 1 + nextRack.SeedCount = 1 + + rackInfo := []*RackInformation{nextRack} + + rc.desiredRackInformation = rackInfo + rc.statefulSets = make([]*appsv1.StatefulSet, len(rackInfo)) + + rc.clusterPods = mockPods + assert.Equal(int(*desiredStatefulSet.Spec.Replicas), len(rc.datacenterPods())) +} + +func TestDatacenterPodsOldLabels(t *testing.T) { + rc, _, cleanupMockScr := setupTest() + defer cleanupMockScr() + assert := assert.New(t) + + // We fake the process a bit to get old style naming and labels + rc.Datacenter.Name = "overrideMe" + + desiredStatefulSet, err := newStatefulSetForCassandraDatacenter( + nil, + "default", + rc.Datacenter, + 3) + assert.NoErrorf(err, "error occurred creating statefulset") + + desiredStatefulSet.Status.ReadyReplicas = *desiredStatefulSet.Spec.Replicas + + trackObjects := []runtime.Object{ + desiredStatefulSet, + rc.Datacenter, + } + + mockPods := mockReadyPodsForStatefulSet(desiredStatefulSet, rc.Datacenter.Spec.ClusterName, rc.Datacenter.Name) + for idx := range mockPods { + mp := mockPods[idx] + trackObjects = append(trackObjects, mp) + } + + rc.Client = fake.NewClientBuilder().WithStatusSubresource(rc.Datacenter).WithRuntimeObjects(trackObjects...).Build() + + nextRack := &RackInformation{} + nextRack.RackName = "default" + nextRack.NodeCount = 1 + nextRack.SeedCount = 1 + + rackInfo := []*RackInformation{nextRack} + + rc.desiredRackInformation = rackInfo + rc.statefulSets = make([]*appsv1.StatefulSet, len(rackInfo)) + + rc.clusterPods = mockPods + + // Lets modify the Datacenter names and set the status like it used to be in some older versions + rc.Datacenter.Spec.DatacenterName = "overrideMe" + rc.Datacenter.Name = "dc1" + rc.Datacenter.Status.DatacenterName = ptr.To[string]("overrideMe") + rc.Datacenter.Status.MetadataVersion = 0 + + // We should still find the pods + assert.Equal(int(*desiredStatefulSet.Spec.Replicas), len(rc.datacenterPods())) +} diff --git a/scripts/release-helm-chart.sh b/scripts/release-helm-chart.sh index 4a37b775..d3db2030 100755 --- a/scripts/release-helm-chart.sh +++ b/scripts/release-helm-chart.sh @@ -6,7 +6,7 @@ if [[ ! $0 == scripts/* ]]; then fi # This script assumes k8ssandra is checked out at ../k8ssandra and is checked out at main -if [ "$#" -le 1 ]; then +if [ "$#" -lt 1 ]; then echo "Usage: scripts/release-helm-chart.sh version legacy" echo "Script assumes you are in the correct branch / tag and that k8ssandra repository" echo "has been checked out to ../k8ssandra/. If legacy is set, the script will generate" diff --git a/tests/decommission_dc/decommission_dc_suite_test.go b/tests/decommission_dc/decommission_dc_suite_test.go index 7aa3c746..39f9a491 100644 --- a/tests/decommission_dc/decommission_dc_suite_test.go +++ b/tests/decommission_dc/decommission_dc_suite_test.go @@ -12,22 +12,19 @@ import ( "github.com/k8ssandra/cass-operator/tests/kustomize" ginkgo_util "github.com/k8ssandra/cass-operator/tests/util/ginkgo" "github.com/k8ssandra/cass-operator/tests/util/kubectl" - - api "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1" ) var ( - testName = "Delete DC and verify it is correctly decommissioned in multi-dc cluster" - namespace = "test-decommission-dc" - dc1Name = "dc1" - dc1OverrideName = "My_Super_Dc" - dc2Name = "dc2" - dc1Yaml = "../testdata/default-two-rack-two-node-dc.yaml" - dc2Yaml = "../testdata/default-two-rack-two-node-dc2.yaml" - dc1Label = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dc1OverrideName)) - dc2Label = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dc2Name) - seedLabel = "cassandra.datastax.com/seed-node=true" - taskYaml = "../testdata/tasks/rebuild_task.yaml" + testName = "Delete DC and verify it is correctly decommissioned in multi-dc cluster" + namespace = "test-decommission-dc" + dc1Name = "dc1" + dc2Name = "dc2" + dc1Yaml = "../testdata/default-two-rack-two-node-dc.yaml" + dc2Yaml = "../testdata/default-two-rack-two-node-dc2.yaml" + dc1Label = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dc1Name) + dc2Label = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dc2Name) + seedLabel = "cassandra.datastax.com/seed-node=true" + taskYaml = "../testdata/tasks/rebuild_task.yaml" // dcLabel = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dcName) ns = ginkgo_util.NewWrapper(testName, namespace) ) @@ -137,7 +134,7 @@ var _ = Describe(testName, func() { // Wait for the task to be completed ns.WaitForCompleteTask("rebuild-dc") - podNames := ns.GetDatacenterReadyPodNames(dc1OverrideName) + podNames := ns.GetDatacenterReadyPodNames(dc1Name) Expect(len(podNames)).To(Equal(2)) dcs := findDatacenters(podNames[0]) @@ -160,7 +157,7 @@ var _ = Describe(testName, func() { ns.WaitForOutputAndLog(step, k, "[]", 300) // Verify nodetool status has only a single Datacenter - podNames = ns.GetDatacenterReadyPodNames(dc1OverrideName) + podNames = ns.GetDatacenterReadyPodNames(dc1Name) if len(podNames) != 2 { // This is to catch why the test sometimes fails on the check (string parsing? or real issue?) diff --git a/tests/rolling_restart_with_override/rolling_restart_suite_with_override_test.go b/tests/rolling_restart_with_override/rolling_restart_suite_with_override_test.go index 99b73826..c0d49cd1 100644 --- a/tests/rolling_restart_with_override/rolling_restart_suite_with_override_test.go +++ b/tests/rolling_restart_with_override/rolling_restart_suite_with_override_test.go @@ -20,14 +20,13 @@ import ( ) var ( - testName = "DC override Rolling Restart" - namespace = "test-override-with-rolling-restart" - dcName = "dc1" - dcNameOverride = "My_Super_Dc" - dcYaml = "../testdata/default-two-rack-two-node-dc.yaml" - taskYaml = "../testdata/tasks/rolling_restart_override.yaml" - dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) - ns = ginkgo_util.NewWrapper(testName, namespace) + testName = "DC override Rolling Restart" + namespace = "test-override-with-rolling-restart" + dcName = "dc1" + dcYaml = "../testdata/default-two-rack-two-node-dc.yaml" + taskYaml = "../testdata/tasks/rolling_restart_override.yaml" + dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) + ns = ginkgo_util.NewWrapper(testName, namespace) ) func TestLifecycle(t *testing.T) { @@ -86,7 +85,7 @@ var _ = Describe(testName, func() { step = "get ready pods" json = "jsonpath={.items[*].status.containerStatuses[0].ready}" k = kubectl.Get("pods"). - WithLabel(fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcNameOverride))). + WithLabel(fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcName))). WithFlag("field-selector", "status.phase=Running"). FormatOutput(json) @@ -105,7 +104,7 @@ var _ = Describe(testName, func() { // Verify each pod does have the annotation.. json := `jsonpath={.items[0].metadata.annotations.control\.k8ssandra\.io/restartedAt}` k = kubectl.Get("pods"). - WithLabel(fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcNameOverride))). + WithLabel(fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcName))). WithFlag("field-selector", "status.phase=Running"). FormatOutput(json) ns.WaitForOutputPatternAndLog(step, k, `^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$`, 360) diff --git a/tests/test_all_the_things/test_all_the_things_suite_test.go b/tests/test_all_the_things/test_all_the_things_suite_test.go index ef0ccac4..98c3243a 100644 --- a/tests/test_all_the_things/test_all_the_things_suite_test.go +++ b/tests/test_all_the_things/test_all_the_things_suite_test.go @@ -22,14 +22,13 @@ import ( ) var ( - testName = "Test all the things" - namespace = "test-test-all-the-things" - dcName = "dc1" - dcNameOverride = "My_Super_Dc" - dcYaml = "../testdata/default-two-rack-two-node-dc.yaml" - dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) - dcLabel = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcNameOverride)) - ns = ginkgo_util.NewWrapper(testName, namespace) + testName = "Test all the things" + namespace = "test-test-all-the-things" + dcName = "dc1" + dcYaml = "../testdata/default-two-rack-two-node-dc.yaml" + dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) + dcLabel = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", api.CleanLabelValue(dcName)) + ns = ginkgo_util.NewWrapper(testName, namespace) ) func TestLifecycle(t *testing.T) { @@ -89,12 +88,12 @@ var _ = Describe(testName, func() { ns.WaitForDatacenterOperatorProgress(dcName, "Updating", 60) ns.WaitForDatacenterConditionWithTimeout(dcName, "ScalingUp", string(corev1.ConditionFalse), 1200) // Ensure that when 'ScaleUp' becomes 'false' that our pods are in fact up and running - Expect(len(ns.GetDatacenterReadyPodNames(api.CleanLabelValue(dcNameOverride)))).To(Equal(4)) + Expect(len(ns.GetDatacenterReadyPodNames(api.CleanLabelValue(dcName)))).To(Equal(4)) ns.ExpectDoneReconciling(dcName) ns.WaitForDatacenterReady(dcName) - ns.ExpectDatacenterNameStatusUpdated(dcName, dcNameOverride) + ns.ExpectDatacenterNameStatusUpdated(dcName, dcName) // Ensure we have a single CassandraTask created which is a cleanup (and it succeeded) ns.WaitForCompletedCassandraTasks(dcName, "cleanup", 1) @@ -114,7 +113,7 @@ var _ = Describe(testName, func() { FormatOutput(json) ns.WaitForOutputAndLog(step, k, "4", 20) - ns.WaitForDatacenterToHaveNoPods(api.CleanLabelValue(dcNameOverride)) + ns.WaitForDatacenterToHaveNoPods(api.CleanLabelValue(dcName)) step = "resume the dc" json = "{\"spec\": {\"stopped\": false}}" @@ -133,7 +132,7 @@ var _ = Describe(testName, func() { wg.Add(1) go func() { k = kubectl.Logs("-f"). - WithLabel(fmt.Sprintf("statefulset.kubernetes.io/pod-name=cluster1-%s-r1-sts-0", api.CleanupForKubernetes(dcNameOverride))). + WithLabel(fmt.Sprintf("statefulset.kubernetes.io/pod-name=cluster1-%s-r1-sts-0", dcName)). WithFlag("container", "cassandra") output, err := ns.Output(k) Expect(err).ToNot(HaveOccurred()) @@ -148,7 +147,7 @@ var _ = Describe(testName, func() { found, err := regexp.MatchString("node/drain status=200 OK", logOutput) if err == nil && !found { - ns.Log(fmt.Sprintf("logOutput, pod: statefulset.kubernetes.io/pod-name=cluster1-%s-r1-sts-0 => %s", api.CleanLabelValue(dcNameOverride), logOutput)) + ns.Log(fmt.Sprintf("logOutput, pod: statefulset.kubernetes.io/pod-name=cluster1-%s-r1-sts-0 => %s", dcName, logOutput)) } if err != nil { ns.Log(fmt.Sprintf("Regexp parsing failed: %v", err)) diff --git a/tests/testdata/default-three-rack-three-node-dc-4x.yaml b/tests/testdata/default-three-rack-three-node-dc-4x.yaml index 68e0ec79..bada916b 100644 --- a/tests/testdata/default-three-rack-three-node-dc-4x.yaml +++ b/tests/testdata/default-three-rack-three-node-dc-4x.yaml @@ -5,7 +5,8 @@ metadata: spec: clusterName: cluster1 serverType: cassandra - serverVersion: 4.1.4 + datacenterName: My_Super_Dc + serverVersion: 4.1.6 managementApiAuth: insecure: {} size: 3 diff --git a/tests/testdata/image_config_parsing.yaml b/tests/testdata/image_config_parsing.yaml index 1f8caf26..5f6a382f 100644 --- a/tests/testdata/image_config_parsing.yaml +++ b/tests/testdata/image_config_parsing.yaml @@ -18,7 +18,7 @@ defaults: # Note, postfix is ignored if repository is not set cassandra: repository: "k8ssandra/cass-management-api" - suffix: "-ubi8" + suffix: "-ubi" dse: repository: "datastax/dse-mgmtapi-6_8" suffix: "-ubi8" diff --git a/tests/upgrade_operator/upgrade_operator_suite_test.go b/tests/upgrade_operator/upgrade_operator_suite_test.go index 6a8eafd4..126ded2a 100644 --- a/tests/upgrade_operator/upgrade_operator_suite_test.go +++ b/tests/upgrade_operator/upgrade_operator_suite_test.go @@ -20,6 +20,7 @@ var ( testName = "Upgrade Operator" namespace = "test-upgrade-operator" dcName = "dc1" + podId = "pod/cluster1-my-super-dc-r1-sts-0" dcYaml = "../testdata/default-three-rack-three-node-dc-4x.yaml" dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) dcLabel = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dcName) @@ -79,11 +80,11 @@ var _ = Describe(testName, func() { k := kubectl.ApplyFiles(dcYaml) ns.ExecAndLog(step, k) - ns.WaitForDatacenterReady(dcName) + ns.WaitForDatacenterOperatorProgress(dcName, "Ready", 1800) // Get UID of the cluster pod step = "get Cassandra pods UID" - k = kubectl.Get("pod/cluster1-dc1-r1-sts-0").FormatOutput("jsonpath={.metadata.uid}") + k = kubectl.Get(podId).FormatOutput("jsonpath={.metadata.uid}") createdPodUID := ns.OutputAndLog(step, k) step = "get name of 1.19.1 operator pod" @@ -106,7 +107,7 @@ var _ = Describe(testName, func() { // Verify Pod hasn't restarted step = "get Cassandra pods UID" - k = kubectl.Get("pod/cluster1-dc1-r1-sts-0").FormatOutput("jsonpath={.metadata.uid}") + k = kubectl.Get(podId).FormatOutput("jsonpath={.metadata.uid}") postUpgradeCassPodUID := ns.OutputAndLog(step, k) Expect(createdPodUID).To(Equal(postUpgradeCassPodUID)) @@ -120,7 +121,7 @@ var _ = Describe(testName, func() { // Get current system-logger image // Verify the Pod now has updated system-logger container image step = "get Cassandra pod system-logger" - k = kubectl.Get("pod/cluster1-dc1-r1-sts-0").FormatOutput("jsonpath={.spec.containers[?(@.name == 'server-system-logger')].image}") + k = kubectl.Get(podId).FormatOutput("jsonpath={.spec.containers[?(@.name == 'server-system-logger')].image}") loggerImage := ns.OutputAndLog(step, k) Expect(loggerImage).To(Equal("cr.k8ssandra.io/k8ssandra/system-logger:v1.19.1")) @@ -137,14 +138,14 @@ var _ = Describe(testName, func() { // Verify pod has been restarted step = "get Cassandra pods UID" - k = kubectl.Get("pod/cluster1-dc1-r1-sts-0").FormatOutput("jsonpath={.metadata.uid}") + k = kubectl.Get(podId).FormatOutput("jsonpath={.metadata.uid}") postAllowUpgradeUID := ns.OutputAndLog(step, k) Expect(postUpgradeCassPodUID).ToNot(Equal(postAllowUpgradeUID)) // Verify the Pod now has updated system-logger container image step = "get Cassandra pod system-logger" - k = kubectl.Get("pod/cluster1-dc1-r1-sts-0").FormatOutput("jsonpath={.spec.containers[?(@.name == 'server-system-logger')].image}") + k = kubectl.Get(podId).FormatOutput("jsonpath={.spec.containers[?(@.name == 'server-system-logger')].image}") loggerImageNew := ns.OutputAndLog(step, k) Expect(loggerImage).To(Not(Equal(loggerImageNew))) diff --git a/tests/util/ginkgo/lib.go b/tests/util/ginkgo/lib.go index a05e5d86..aed205d9 100644 --- a/tests/util/ginkgo/lib.go +++ b/tests/util/ginkgo/lib.go @@ -6,7 +6,6 @@ package ginkgo_util import ( "encoding/base64" "fmt" - "github.com/pkg/errors" "os" "path/filepath" "regexp" @@ -15,6 +14,8 @@ import ( "strings" "time" + "github.com/pkg/errors" + ginkgo "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "gopkg.in/yaml.v2" @@ -424,16 +425,6 @@ func (ns *NsWrapper) Log(step string) { ginkgo.By(step) } -func (ns *NsWrapper) getDcNameWithOverride(dcName string) string { - json := "jsonpath={.spec.datacenterName}" - k := kubectl.Get("CassandraDatacenter", dcName).FormatOutput(json) - dcNameOverride := ns.OutputPanic(k) - if dcNameOverride == "" { - return dcName - } - return dcNameOverride -} - func (ns *NsWrapper) WaitForDatacenterReadyWithTimeouts(dcName string, podCountTimeout int, dcReadyTimeout int) { json := "jsonpath={.spec.size}" k := kubectl.Get("CassandraDatacenter", dcName).FormatOutput(json) @@ -441,7 +432,7 @@ func (ns *NsWrapper) WaitForDatacenterReadyWithTimeouts(dcName string, podCountT size, err := strconv.Atoi(sizeString) Expect(err).ToNot(HaveOccurred()) - ns.WaitForDatacenterReadyPodCountWithTimeout(ns.getDcNameWithOverride(dcName), size, podCountTimeout) + ns.WaitForDatacenterReadyPodCountWithTimeout(dcName, size, podCountTimeout) ns.WaitForDatacenterOperatorProgress(dcName, "Ready", dcReadyTimeout) }