diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index fe2fa81c..7cb9e77a 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -1,6 +1,6 @@ name: CI on: - pull_request_target: + pull_request: types: - labeled - opened @@ -23,29 +23,14 @@ env: jobs: build: concurrency: - group: build-${{ github.head_ref || github.run_id }} + group: ${{ github.head_ref || github.run_id }} cancel-in-progress: true name: Build and Unit Test runs-on: ubuntu-latest - outputs: - version: ${{ steps.vars.outputs.version }} - clustername: ${{ steps.vars.outputs.clustername }} - pr: ${{ steps.pr.outputs.result }} steps: - - name: Get PR ref - uses: actions/github-script@v7 - id: pr - with: - script: | - const { data: pullRequest } = await github.rest.pulls.get({ - ...context.repo, - pull_number: context.payload.pull_request.number, - }); - return pullRequest - name: Checkout repository uses: actions/checkout@v4 with: - ref: ${{fromJSON(steps.pr.outputs.result).merge_commit_sha}} fetch-depth: 0 - name: Setup Go uses: actions/setup-go@v5 @@ -59,6 +44,31 @@ jobs: - name: Unit tests run: | make test + - name: Build HMC controller image + run: | + make docker-build + + push: + concurrency: + group: push-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + name: E2E Push Images and Charts to GHCR + runs-on: ubuntu-latest + needs: build + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + outputs: + version: ${{ steps.vars.outputs.version }} + clustername: ${{ steps.vars.outputs.clustername }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false - name: Set up Buildx uses: docker/setup-buildx-action@v3 - name: Login to GHCR @@ -73,7 +83,8 @@ jobs: GIT_VERSION=$(git describe --tags --always) echo "version=${GIT_VERSION:1}" >> $GITHUB_OUTPUT echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT - - name: Build and push HMC controller image + - name: Push HMC Controller Image to GHCR + if: uses: docker/build-push-action@v6 with: build-args: | @@ -85,36 +96,36 @@ jobs: push: true cache-from: type=gha cache-to: type=gha,mode=max - - name: Prepare and push HMC template charts + - name: Prepare and push HMC template charts to GHCR run: | + make set-hmc-version make hmc-chart-release make helm-push controller-e2etest: name: E2E Controller runs-on: ubuntu-latest - needs: build + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: push concurrency: group: controller-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup kubectl uses: azure/setup-kubectl@v4 - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'controller' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -129,14 +140,13 @@ jobs: name: E2E Cloud Providers runs-on: ubuntu-latest if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - needs: build + needs: push concurrency: group: cloud-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} env: AWS_REGION: us-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} @@ -151,7 +161,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -162,9 +171,9 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:cloud' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -179,14 +188,13 @@ jobs: name: E2E On-Prem Providers runs-on: self-hosted if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - needs: build + needs: push concurrency: group: onprem-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} env: VSPHERE_USER: ${{ secrets.CI_VSPHERE_USER }} VSPHERE_PASSWORD: ${{ secrets.CI_VSPHERE_PASSWORD }} @@ -205,7 +213,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -215,9 +222,9 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:onprem' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -229,23 +236,21 @@ jobs: test/e2e/*.log cleanup: - name: Cleanup + name: E2E Cleanup needs: - - build + - push - provider-cloud-e2etest runs-on: ubuntu-latest - if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.build.result, 'success') }} + if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.push.result, 'success') }} timeout-minutes: 15 outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -260,7 +265,7 @@ jobs: AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} - CLUSTER_NAME: '${{ needs.build.outputs.clustername }}' + CLUSTER_NAME: '${{ needs.push.outputs.clustername }}' run: | make dev-aws-nuke - make dev-azure-nuke + make dev-azure-nuke \ No newline at end of file diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index d37c4f8f..799a10a6 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -191,6 +191,11 @@ rules: - clusterprofiles - clustersummaries verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com resources: diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index e76a4c24..c9e74b84 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -53,7 +53,8 @@ var _ = BeforeSuite(func() { _, err := utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) cmd = exec.Command("make", "test-apply") - _, err = utils.Run(cmd) + output, err := utils.Run(cmd) + _, _ = fmt.Fprintln(GinkgoWriter, string(output)) Expect(err).NotTo(HaveOccurred()) By("validating that the hmc-controller and CAPI provider controllers are running and ready") @@ -217,8 +218,10 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") output, err := utils.Run(cmd) if err != nil { - utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) - return + if !strings.Contains(err.Error(), "unable to verify clusterctl version") { + utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) + return + } } err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) if err != nil { diff --git a/test/e2e/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go index e3801e4e..285f1b27 100644 --- a/test/e2e/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "path/filepath" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -36,6 +37,11 @@ import ( "github.com/Mirantis/hmc/internal/utils/status" ) +const ( + numOfRetries = 10 + initialRetryTimeout = time.Second * 1 +) + type KubeClient struct { Client kubernetes.Interface ExtendedClient apiextensionsclientset.Interface @@ -194,9 +200,20 @@ func (kc *KubeClient) CreateManagedCluster( Resource: "managedclusters", }, true) - _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) - if !apierrors.IsAlreadyExists(err) { - Expect(err).NotTo(HaveOccurred(), "failed to create %s", kind) + i := 0 + timeout := initialRetryTimeout + for i < numOfRetries { + _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) + if err == nil || apierrors.IsAlreadyExists(err) { + break + } + i++ + if i == numOfRetries { + Expect(err).NotTo(HaveOccurred(), "failed to create %s after %d retries", kind, numOfRetries) + } + _, _ = fmt.Fprintf(GinkgoWriter, "Create ManagedCluster, attempt #%d failed, retrying after %d seconds...\n", i, timeout) + time.Sleep(timeout) + timeout *= 2 } return func() error { @@ -279,3 +296,15 @@ func (kc *KubeClient) ListK0sControlPlanes( Resource: "k0scontrolplanes", }, clusterName) } + +func (kc *KubeClient) ListAWSManagedControlPlanes( + ctx context.Context, clusterName string, +) ([]unstructured.Unstructured, error) { + GinkgoHelper() + + return kc.listResource(ctx, schema.GroupVersionResource{ + Group: "controlplane.cluster.x-k8s.io", + Version: "v1beta2", + Resource: "awsmanagedcontrolplanes", + }, clusterName) +} diff --git a/test/e2e/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go index 72efa96f..e8be90e1 100644 --- a/test/e2e/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -50,6 +50,7 @@ const ( TemplateAzureStandaloneCP Template = "azure-standalone-cp" TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" TemplateVSphereHostedCP Template = "vsphere-hosted-cp" + TemplateEKSCP Template = "aws-eks-cp" ) //go:embed resources/aws-standalone-cp.yaml.tpl @@ -70,6 +71,9 @@ var vsphereStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/vsphere-hosted-cp.yaml.tpl var vsphereHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/aws-eks-cp.yaml.tpl +var eksCPManagedClusterTemplateBytes []byte + func FilterAllProviders() []string { return []string{ utils.HMCControllerLabel, @@ -134,6 +138,8 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes case TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes + case TemplateEKSCP: + managedClusterTemplateBytes = eksCPManagedClusterTemplateBytes default: Fail(fmt.Sprintf("Unsupported template: %s", templateName)) } diff --git a/test/e2e/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go index 4df0fa84..3ccb5a07 100644 --- a/test/e2e/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -65,14 +65,22 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") + case TemplateEKSCP: + resourcesToValidate["control-planes"] = validateAWSManagedControlPlanes + delete(resourcesToValidate, "csi-driver") case TemplateAzureStandaloneCP, TemplateVSphereStandaloneCP: delete(resourcesToValidate, "csi-driver") } } else { + validateCPDeletedFunc := validateK0sControlPlanesDeleted + if template == TemplateEKSCP { + validateCPDeletedFunc = validateAWSManagedControlPlanesDeleted + } + resourcesToValidate = map[string]resourceValidationFunc{ "clusters": validateClusterDeleted, "machinedeployments": validateMachineDeploymentsDeleted, - "control-planes": validateK0sControlPlanesDeleted, + "control-planes": validateCPDeletedFunc, } resourceOrder = []string{"clusters", "machinedeployments", "control-planes"} } diff --git a/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl new file mode 100644 index 00000000..4c849089 --- /dev/null +++ b/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl @@ -0,0 +1,17 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-eks + namespace: ${NAMESPACE} +spec: + template: aws-eks-0-0-2 + credential: ${AWS_CLUSTER_IDENTITY}-cred + config: + region: ${AWS_REGION} + workersNumber: ${WORKERS_NUMBER:=1} + clusterIdentity: + name: ${AWS_CLUSTER_IDENTITY}-cred + namespace: ${NAMESPACE} + publicIP: ${AWS_PUBLIC_IP:=true} + worker: + instanceType: ${AWS_INSTANCE_TYPE:=t3.small} diff --git a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 8a2700c6..070736db 100644 --- a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -2,6 +2,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} + namespace: ${NAMESPACE} spec: template: aws-hosted-cp-0-0-3 credential: ${AWS_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index 24c449bc..33462807 100644 --- a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -2,6 +2,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} + namespace: ${NAMESPACE} spec: template: aws-standalone-cp-0-0-3 credential: ${AWS_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index c0475f3f..1bbf76f1 100644 --- a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -2,6 +2,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} + namespace: ${NAMESPACE} spec: template: vsphere-hosted-cp-0-0-3 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index cc5fa87b..159113f7 100644 --- a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -2,6 +2,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} + namespace: ${NAMESPACE} spec: template: vsphere-standalone-cp-0-0-3 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go index e09d4c25..4272f9fe 100644 --- a/test/e2e/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -101,3 +101,21 @@ func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeCli return nil } + +func validateAWSManagedControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + controlPlanes, err := kc.ListAWSManagedControlPlanes(ctx, clusterName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + var cpNames []string + if len(controlPlanes) > 0 { + for _, cp := range controlPlanes { + cpNames = append(cpNames, cp.GetName()) + + return fmt.Errorf("AWS Managed control planes still exist: %s", cpNames) + } + } + + return nil +} diff --git a/test/e2e/managedcluster/validate_deployed.go b/test/e2e/managedcluster/validate_deployed.go index bae823f7..7a600ff3 100644 --- a/test/e2e/managedcluster/validate_deployed.go +++ b/test/e2e/managedcluster/validate_deployed.go @@ -55,7 +55,7 @@ func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, clusterName Fail(err.Error()) } - return utils.ValidateConditionsTrue(cluster) + return utils.NewConditionsValidator().IfTrue(cluster) } func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { @@ -79,7 +79,7 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam Fail(err.Error()) } - if err := utils.ValidateConditionsTrue(&md); err != nil { + if err := utils.NewConditionsValidator().IfTrue(&md); err != nil { return err } } @@ -90,7 +90,7 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam Fail(err.Error()) } - if err := utils.ValidateConditionsTrue(&machine); err != nil { + if err := utils.NewConditionsValidator().IfTrue(&machine); err != nil { return err } } @@ -113,7 +113,7 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl // k0s does not use the metav1.Condition type for status.conditions, // instead it uses a custom type so we can't use - // ValidateConditionsTrue here, instead we'll check for "ready: true". + // ordinary conditions validation here, instead we'll check for "ready: true". objStatus, found, err := unstructured.NestedFieldCopy(controlPlane.Object, "status") if !found { return fmt.Errorf("no status found for %s: %s", objKind, objName) @@ -139,6 +139,26 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl return nil } +func validateAWSManagedControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + controlPlanes, err := kc.ListAWSManagedControlPlanes(ctx, clusterName) + if err != nil { + return err + } + + for _, controlPlane := range controlPlanes { + if err := utils.ValidateObjectNamePrefix(&controlPlane, clusterName); err != nil { + Fail(err.Error()) + } + + // EKSControlPlaneCreating condition very often has READY=False, SEVERITY=Info and REASON=created (this is fine). + if err := utils.NewConditionsValidator(utils.WithExcluded([]string{"EKSControlPlaneCreating"})).IfTrue(&controlPlane); err != nil { + return err + } + } + + return nil +} + // validateCSIDriver validates that the provider CSI driver is functioning // by creating a PVC and verifying it enters "Bound" status. func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go index 6614698b..4eba91bc 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/provider_aws_test.go @@ -72,7 +72,7 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order } }) - It("should work with an AWS provider", func() { + XIt("should work with an AWS provider", func() { // Deploy a standalone cluster and verify it is running/ready. // Deploy standalone with an xlarge instance since it will also be // hosting the hosted cluster. @@ -186,4 +186,45 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order time.Second).Should(Succeed()) */ }) + + It("should work with an EKS provider", func() { + // Deploy a standalone cluster and verify it is running/ready. + GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.small") + + cmd := exec.Command("kubectl", "get", "clustertemplates", "-n", "hmc-system") + output, err := utils.Run(cmd) + _, _ = fmt.Fprintln(GinkgoWriter, string(output)) + Expect(err).NotTo(HaveOccurred()) + + templateBy(managedcluster.TemplateEKSCP, "creating a ManagedCluster for EKS") + sd := managedcluster.GetUnstructured(managedcluster.TemplateEKSCP) + clusterName = sd.GetName() + + standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + + templateBy(managedcluster.TemplateEKSCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateEKSCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(60 * time.Minute).WithPolling(30 * time.Second).Should(Succeed()) + + // --- clean up --- + templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster for EKS") + Expect(standaloneDeleteFunc()).NotTo(HaveOccurred()) + + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(15 * time.Minute).WithPolling(10 * + time.Second).Should(Succeed()) + }) }) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go index fcdbe27c..9e2b76ab 100644 --- a/test/e2e/provider_azure_test.go +++ b/test/e2e/provider_azure_test.go @@ -32,7 +32,7 @@ import ( "github.com/Mirantis/hmc/test/utils" ) -var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { +var _ = XContext("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { var ( kc *kubeclient.KubeClient standaloneClient *kubeclient.KubeClient @@ -77,7 +77,7 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or } }) - It("should work with an Azure provider", func() { + XIt("should work with an Azure provider", func() { templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) sdName = sd.GetName() diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go index 202d7ded..251d4c18 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/provider_vsphere_test.go @@ -29,7 +29,7 @@ import ( "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" ) -var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { +var _ = XContext("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { var ( kc *kubeclient.KubeClient deleteFunc func() error @@ -78,7 +78,7 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" } }) - It("should deploy standalone managed cluster", func() { + XIt("should deploy standalone managed cluster", func() { By("creating a managed cluster") d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) clusterName = d.GetName() diff --git a/test/utils/utils.go b/test/utils/utils.go index 56ce5ee3..79212558 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/strings/slices" "github.com/Mirantis/hmc/internal/utils/status" ) @@ -111,10 +112,28 @@ func GetProjectDir() (string, error) { return wd, nil } -// ValidateConditionsTrue iterates over the conditions of the given +type ConditionsValidator struct { + excludedConditions []string +} + +func NewConditionsValidator(options ...func(*ConditionsValidator)) *ConditionsValidator { + cv := &ConditionsValidator{} + for _, o := range options { + o(cv) + } + return cv +} + +func WithExcluded(excludedConditions []string) func(*ConditionsValidator) { + return func(cv *ConditionsValidator) { + cv.excludedConditions = excludedConditions + } +} + +// IfTrue iterates over the conditions of the given // unstructured object and returns an error if any of the conditions are not // true. Conditions are expected to be of type metav1.Condition. -func ValidateConditionsTrue(unstrObj *unstructured.Unstructured) error { +func (cv *ConditionsValidator) IfTrue(unstrObj *unstructured.Unstructured) error { objKind, objName := status.ObjKindName(unstrObj) conditions, err := status.ConditionsFromUnstructured(unstrObj) @@ -129,6 +148,10 @@ func ValidateConditionsTrue(unstrObj *unstructured.Unstructured) error { continue } + if slices.Contains(cv.excludedConditions, c.Type) { + continue + } + errs = errors.Join(errors.New(ConvertConditionsToString(c)), errs) }