From 34b34dcd36eac2c3ad382fa2826740182ed248e2 Mon Sep 17 00:00:00 2001 From: gabriel-farache Date: Thu, 31 Oct 2024 15:38:47 +0100 Subject: [PATCH] Add CI for all workflow charts Signed-off-by: gabriel-farache --- .github/workflows/create-ocp-project.yaml | 133 ++++++++++ .github/workflows/extendable-workflow.yaml | 93 +++++++ .github/workflows/greeting.yaml | 93 +++++++ .github/workflows/modify-vm-resources.yaml | 134 ++++++++++ .github/workflows/move2kube-e2e.yaml | 90 +++++++ .github/workflows/mta-v7.x-e2e.yaml | 126 ++++++++++ .github/workflows/mtv-migration.yaml | 121 +++++++++ .github/workflows/mtv-plan.yaml | 121 +++++++++ .github/workflows/request-vm-cnv.yaml | 132 ++++++++++ .../00-move2kube-instance-route.yaml | 11 + .../templates/00-move2kube-instance.yaml | 14 +- docs/main/move2kube/README.md | 4 +- docs/main/move2kube/install_m2k.sh | 57 ++++- docs/main/mta-v7.x/README.md | 4 +- docs/main/mta-v7.x/install-mta-v7.sh | 38 ++- e2e/move2kube.sh | 238 ++++++++++++++++++ 16 files changed, 1370 insertions(+), 39 deletions(-) create mode 100644 .github/workflows/create-ocp-project.yaml create mode 100644 .github/workflows/extendable-workflow.yaml create mode 100644 .github/workflows/greeting.yaml create mode 100644 .github/workflows/modify-vm-resources.yaml create mode 100644 .github/workflows/move2kube-e2e.yaml create mode 100644 .github/workflows/mta-v7.x-e2e.yaml create mode 100644 .github/workflows/mtv-migration.yaml create mode 100644 .github/workflows/mtv-plan.yaml create mode 100644 .github/workflows/request-vm-cnv.yaml create mode 100644 charts/move2kube/templates/00-move2kube-instance-route.yaml create mode 100755 e2e/move2kube.sh diff --git a/.github/workflows/create-ocp-project.yaml b/.github/workflows/create-ocp-project.yaml new file mode 100644 index 00000000..13879a18 --- /dev/null +++ b/.github/workflows/create-ocp-project.yaml @@ -0,0 +1,133 @@ +name: Create OCP project CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/create-ocp-project/**' + - .github/workflows/create-ocp-project.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Create OCP project serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"create-ocp-project\" + } + } + } + )" charts/create-ocp-project/templates/0?-sonataflow_create-ocp-project.yaml + cd charts + helm install create-ocp-project create-ocp-project + WORKFLOW_NAME=create-ocp-project + kubectl patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret})'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl patch sonataflow create-ocp-project --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl scale deploy "${WORKFLOW_NAME}" --replicas=0 + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy create-ocp-project --type="NodePort" --port=8080 --name=create-ocp-project-svc + kubectl port-forward svc/create-ocp-project-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/create-ocp-project' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/extendable-workflow.yaml b/.github/workflows/extendable-workflow.yaml new file mode 100644 index 00000000..e0c7f587 --- /dev/null +++ b/.github/workflows/extendable-workflow.yaml @@ -0,0 +1,93 @@ +name: Extendable Workflow CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/extendable-workflow/**' + - .github/workflows/extendable-workflow.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Extendable Workflow serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"extendable-workflow\" + } + } + } + )" charts/extendable-workflow/templates/0?-sonataflow_extendable-workflow.yaml + cd charts + helm install extendable-workflow extendable-workflow + sleep 3 + WORKFLOW_NAME=extendable-workflow + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy extendable-workflow --type="NodePort" --port=8080 --name=extendable-workflow-svc + kubectl port-forward svc/extendable-workflow-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/extendable-workflow' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/greeting.yaml b/.github/workflows/greeting.yaml new file mode 100644 index 00000000..d149d6dc --- /dev/null +++ b/.github/workflows/greeting.yaml @@ -0,0 +1,93 @@ +name: Greeting CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/greeting/**' + - .github/workflows/greeting.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Greeting serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"greeting\" + } + } + } + )" charts/greeting/templates/0?-sonataflow_greeting.yaml + cd charts + helm install greeting greeting + sleep 3 + WORKFLOW_NAME=greeting + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy greeting --type="NodePort" --port=8080 --name=greeting-svc + kubectl port-forward svc/greeting-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/greeting' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/modify-vm-resources.yaml b/.github/workflows/modify-vm-resources.yaml new file mode 100644 index 00000000..26cc30bd --- /dev/null +++ b/.github/workflows/modify-vm-resources.yaml @@ -0,0 +1,134 @@ +name: Modify VM Resources CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/modify-vm-resources/**' + - .github/workflows/modify-vm-resources.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Modify VM Resources serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"mta\" + } + } + } + )" charts/modify-vm-resources/templates/0?-sonataflow_modify-vm-resources.yaml + cd charts + helm install modify-vm-resources modify-vm-resources + WORKFLOW_NAME=modify-vm-resources + kubectl patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret})'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl patch sonataflow modify-vm-resources --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl scale deploy "${WORKFLOW_NAME}" --replicas=0 + sleep 3 + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy modify-vm-resources --type="NodePort" --port=8080 --name=modify-vm-resources-svc + kubectl port-forward svc/modify-vm-resources-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/modify-vm-resources' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/move2kube-e2e.yaml b/.github/workflows/move2kube-e2e.yaml new file mode 100644 index 00000000..8423141a --- /dev/null +++ b/.github/workflows/move2kube-e2e.yaml @@ -0,0 +1,90 @@ +name: Move2kube Workflow end to end tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/move2kube/**' + - .github/workflows/move2kube-e2e.yaml + - e2e/move2kube.sh + +jobs: + run-m2k-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + - name: Create sonataflow-infra namespace + run: | + # Needs to knative events resources in this namespace as the broker url is hard coded at the moment + kubectl create ns sonataflow-infra + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Move2kube serverless workflow and its components + env: + SSH_PUB_KEY: ${{secrets.SSH_PUB_KEY}} + SSH_PRIV_KEY: ${{secrets.SSH_PRIV_KEY}} + run: | + echo "${SSH_PUB_KEY}" >> id_rsa.pub + echo "${SSH_PRIV_KEY}" >> id_rsa + # we are not on OCP but on k8s, route does not exists + rm -rf charts/move2kube/templates/00-move2kube-instance-route.yaml + + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"m2k\" + } + } + } + )" charts/move2kube/templates/0?-sonataflow_m2k.yaml + + K8S_INSTALL=1 PRIV_ID_RSA_PATH=id_rsa PUB_ID_RSA_PATH=id_rsa.pub M2K_HELM_REPO=charts/move2kube/ TARGET_NS=default ./docs/main/move2kube/install_m2k.sh + + - name: Run e2e script + run: | + export BACKEND_SECRET=$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret} | base64 -d) + e2e/move2kube.sh + + - name: Export kind Logs + if: always() + run: | + kubectl get pods + kubectl get pods -n sonataflow-infra + kubectl get broker + kubectl get triggers + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of th previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mta-v7.x-e2e.yaml b/.github/workflows/mta-v7.x-e2e.yaml new file mode 100644 index 00000000..5b964a6c --- /dev/null +++ b/.github/workflows/mta-v7.x-e2e.yaml @@ -0,0 +1,126 @@ +name: MTA v7.x Workflow end to end tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mta-v7.x/**' + - .github/workflows/mta-v7.x-e2e.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Install Konveyor 0.3 (MTA upstream equivalent to 7.0) + run: | + # install konveyor operator + # version 0.2 is MTA 6.2 and 0.3 is 7.x + kubectl create -f https://operatorhub.io/install/konveyor-0.3/konveyor-operator.yaml + # give the apiserver time + echo "sleeping 300 seconds to give time for the operator to pull images and start" + sleep 300s + kubectl get csv -A + # TODO its a bit smelly that the csv name is coded here. + kubectl wait --for=jsonpath='{.status.phase}=Succeeded' -n my-konveyor-operator csv/konveyor-operator.v0.3.2 + kubectl get pods -A + kubectl wait --for=condition=Ready=true pods -l "name=tackle-operator" -n my-konveyor-operator --timeout=240s + kubectl get crds + kubectl create -f - << EOF + kind: Tackle + apiVersion: tackle.konveyor.io/v1alpha1 + metadata: + name: tackle + namespace: my-konveyor-operator + spec: + feature_auth_required: false + hub_database_volume_size: 1Gi + hub_bucket_volume_size: 1Gi + EOF + + kubectl get pods -n my-konveyor-operator + sleep 60s + kubectl get tackle -n my-konveyor-operator -o yaml + echo "wait for tackle ui to be ready" + kubectl get pods -n my-konveyor-operator + sleep 300s + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=tackle-ui" -n my-konveyor-operator --timeout=240s + # now MTA workflow can execute agains tackle-ui.my-konveyor-operator.svc:8080 + + - name: Deploy MTA serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"mta\" + } + } + } + )" charts/mta-v7.x/templates/0?-sonataflow_mta-analysis-v7.yaml + # we are on k8s, not OCP, some resources are not available + rm -rf charts/mta-v7.x/templates/00-mta-operator.yaml + rm -rf charts/mta-v7.x/templates/00-tackle-resources.yaml + K8S_INSTALL=1 MTA_HELM_REPO=charts/mta-v7.x/ TARGET_NS=default ./docs/main/mta-v7.x/install-mta-v7.sh + + - name: Test workflow is responsive + run: | + kubectl expose deploy mta-analysis-v7 --type="NodePort" --port=8080 --name=mta-svc + kubectl port-forward svc/mta-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mta-analysis-v7' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mtv-migration.yaml b/.github/workflows/mtv-migration.yaml new file mode 100644 index 00000000..04b734d5 --- /dev/null +++ b/.github/workflows/mtv-migration.yaml @@ -0,0 +1,121 @@ +name: MTV Migration CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mtv-migration/**' + - .github/workflows/mtv-migration.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTV Migration serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"mtv-migration\" + } + } + } + )" charts/mtv-migration/templates/0?-sonataflow_mtv-migration.yaml + cd charts + helm install mtv-migration mtv-migration + WORKFLOW_NAME=mtv-migration + kubectl patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret})'" + }, + "stringData":{ + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl patch sonataflow mtv-migration --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl scale deploy "${WORKFLOW_NAME}" --replicas=0 + sleep 3 + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy mtv-migration --type="NodePort" --port=8080 --name=mtv-migration-svc + kubectl port-forward svc/mtv-migration-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mtv-migration' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mtv-plan.yaml b/.github/workflows/mtv-plan.yaml new file mode 100644 index 00000000..c389699d --- /dev/null +++ b/.github/workflows/mtv-plan.yaml @@ -0,0 +1,121 @@ +name: MTV Plan CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mtv-plan/**' + - .github/workflows/mtv-plan.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTV Migration serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"mtv-plan\" + } + } + } + )" charts/mtv-plan/templates/0?-sonataflow_mtv-plan.yaml + cd charts + helm install mtv-plan mtv-plan + WORKFLOW_NAME=mtv-plan + kubectl patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret})'" + }, + "stringData":{ + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl patch sonataflow mtv-plan --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl scale deploy "${WORKFLOW_NAME}" --replicas=0 + sleep 3 + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy mtv-plan --type="NodePort" --port=8080 --name=mtv-plan-svc + kubectl port-forward svc/mtv-plan-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mtv-plan' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kubectl get deploy -A + kubectl get sonataflow -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/request-vm-cnv.yaml b/.github/workflows/request-vm-cnv.yaml new file mode 100644 index 00000000..fa00b2ac --- /dev/null +++ b/.github/workflows/request-vm-cnv.yaml @@ -0,0 +1,132 @@ +name: Request VM CNV CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/request-vm-cnv/**' + - .github/workflows/request-vm-cnv.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy Request VM CNV serverless workflow + run: | + kubectl patch secret orchestrator-postgresql --patch='{"stringData": { "postgres-username": "postgres" }}' + yq --inplace ".spec.persistence = ( + { + \"postgresql\": { + \"secretRef\": { + \"name\": \"orchestrator-postgresql\", + \"userKey\": \"postgres-username\", + \"passwordKey\": \"postgres-password\" + }, + \"serviceRef\": { + \"name\": \"orchestrator-postgresql\", + \"port\": 5432, + \"databaseName\": \"postgres\", + \"databaseSchema\": \"request-vm-cnv\" + } + } + } + )" charts/request-vm-cnv/templates/0?-sonataflow_request-vm-cnv.yaml + cd charts + helm install request-vm-cnv request-vm-cnv + WORKFLOW_NAME=request-vm-cnv + kubectl patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret})'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl patch sonataflow request-vm-cnv --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl scale deploy "${WORKFLOW_NAME}" --replicas=0 + sleep 3 + kubectl get pod -A + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m || kubectl describe pod -l app="${WORKFLOW_NAME}" + + - name: Test workflow is responsive + run: | + kubectl expose deploy request-vm-cnv --type="NodePort" --port=8080 --name=request-vm-cnv-svc + kubectl port-forward svc/request-vm-cnv-svc 8080:8080 & + sleep 3 + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/request-vm-cnv' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: | + kubectl get pod -A + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/charts/move2kube/templates/00-move2kube-instance-route.yaml b/charts/move2kube/templates/00-move2kube-instance-route.yaml new file mode 100644 index 00000000..0c73bbf3 --- /dev/null +++ b/charts/move2kube/templates/00-move2kube-instance-route.yaml @@ -0,0 +1,11 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ .Values.instance.name }}-route + namespace: {{ .Values.instance.namespace }} +spec: + tls: + termination: edge + to: + kind: Service + name: {{ .Values.instance.name }}-svc diff --git a/charts/move2kube/templates/00-move2kube-instance.yaml b/charts/move2kube/templates/00-move2kube-instance.yaml index 30af1793..b6220bb1 100644 --- a/charts/move2kube/templates/00-move2kube-instance.yaml +++ b/charts/move2kube/templates/00-move2kube-instance.yaml @@ -63,16 +63,4 @@ spec: - port: 8080 protocol: TCP selector: - app: {{ .Values.instance.name }} ---- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: {{ .Values.instance.name }}-route - namespace: {{ .Values.instance.namespace }} -spec: - tls: - termination: edge - to: - kind: Service - name: {{ .Values.instance.name }}-svc + app: {{ .Values.instance.name }} \ No newline at end of file diff --git a/docs/main/move2kube/README.md b/docs/main/move2kube/README.md index ad855d49..b4219789 100644 --- a/docs/main/move2kube/README.md +++ b/docs/main/move2kube/README.md @@ -39,13 +39,13 @@ See https://www.parodos.dev/orchestrator-helm-chart/postgresql on how to install ## Automated installation Run the [installation script](install_m2k.sh): ```console -PRIV_ID_RSA_PATH=${HOME}/.ssh/id_rsa PUB_ID_RSA_PATH=${HOME}/.ssh/id_rsa.pub ./install_m2k.sh +PRIV_ID_RSA_PATH=${HOME}/.ssh/id_rsa PUB_ID_RSA_PATH=${HOME}/.ssh/id_rsa.pub TARGET_NS=sonataflow-infra ./install_m2k.sh ``` You can override the helm repo to use by setting `M2K_HELM_REPO`. By default `orchestrator-workflows/move2kube` is used and the helm repository `orchestrator-workflows` is installed from `https://parodos.dev/serverless-workflows-config` To use the local file, set `M2K_HELM_REPO` to `.`: ```console -M2K_HELM_REPO=. PRIV_ID_RSA_PATH=${HOME}/.ssh/id_rsa PUB_ID_RSA_PATH=${HOME}/.ssh/id_rsa.pub ./install_m2k.sh +M2K_HELM_REPO=. PRIV_ID_RSA_PATH=${HOME}/.ssh/id_rsa PUB_ID_RSA_PATH=${HOME}/.ssh/id_rsa.pub TARGET_NS=sonataflow-infra ./install_m2k.sh ``` ## Manual installation ### Prerequisites diff --git a/docs/main/move2kube/install_m2k.sh b/docs/main/move2kube/install_m2k.sh index b49e6678..c31f8a00 100755 --- a/docs/main/move2kube/install_m2k.sh +++ b/docs/main/move2kube/install_m2k.sh @@ -1,6 +1,11 @@ #!/bin/bash -CLUSTER_CLIENT=$(which "${CLUSTER_CLIENT}" >/dev/null 2>&1 && echo oc || echo kubectl) +CLUSTER_CLIENT=$(which oc >/dev/null 2>&1 && echo oc || echo kubectl) +if [[ "${CLUSTER_CLIENT}" == "oc" ]]; then + echo "Deploying on OCP cluster" +else + echo "Deploying on k8s cluster" +fi if [[ -z "${PRIV_ID_RSA_PATH}" ]]; then echo 'PRIV_ID_RSA_PATH env variable must be set to the path of the private id_rsa file to use. I.e: ${HOME}/.ssh/id_rsa' @@ -12,15 +17,24 @@ if [[ -z "${PUB_ID_RSA_PATH}" ]]; then exit -1 fi +if [[ -z "${TARGET_NS}" ]]; then + echo 'TARGET_NS env variable must be set to the namespace in which the workflow must be installed' + exit -1 +fi + +if [[ ! -z "${K8S_INSTALL}" ]]; then + echo "Running on k8s, adapting the script" +fi + if [[ -z "${M2K_HELM_REPO}" ]]; then M2K_HELM_REPO=orchestrator-workflows/move2kube echo "M2K_HELM_REPO not set, using default helm m2k helm repository ${M2K_HELM_REPO}" helm repo add orchestrator-workflows https://parodos.dev/serverless-workflows-config fi -TARGET_NS=sonataflow-infra M2K_INSTANCE_NS=move2kube WORKFLOW_NAME=m2k + "${CLUSTER_CLIENT}" patch configmap/config-features \ -n knative-serving \ --type merge \ @@ -29,19 +43,38 @@ WORKFLOW_NAME=m2k helm install move2kube ${M2K_HELM_REPO} -n ${TARGET_NS} --set instance.namespace=${M2K_INSTANCE_NS} if [ $? -ne 0 ]; then echo "move2kube chart already installed, run helm delete move2kube -n ${TARGET_NS} to remove it" - exit -1 + exit 1 fi -"${CLUSTER_CLIENT}" -n ${TARGET_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${TARGET_NS} get deployments m2k-save-transformation-func-v1-deployment -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default -"${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get deployments move2kube -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default + +if [[ -z "${K8S_INSTALL}" ]]; then + "${CLUSTER_CLIENT}" -n ${TARGET_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${TARGET_NS} get deployments m2k-save-transformation-func-v1-deployment -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default + "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get deployments move2kube -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default +fi + "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} create secret generic sshkeys --from-file=id_rsa=${PRIV_ID_RSA_PATH} --from-file=id_rsa.pub=${PUB_ID_RSA_PATH} "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} scale deploy move2kube --replicas=0 && "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} scale deploy move2kube --replicas=1 -kubectl -n ${M2K_INSTANCE_NS} wait --for=condition=Ready=true --timeout=2m pod -l app=move2kube-instance -M2K_ROUTE=$("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get routes move2kube-route -o yaml | yq -r .spec.host) +"${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} wait --for=condition=Ready=true --timeout=2m pod -l app=move2kube + +if [[ -z "${K8S_INSTALL}" ]]; then + M2K_ROUTE="https://"$("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get routes move2kube-route -o yaml | yq -r .spec.host) +else + M2K_ROUTE="http://move2kube-svc.${M2K_INSTANCE_NS}.svc.cluster.local:8080" +fi + "${CLUSTER_CLIENT}" -n ${TARGET_NS} delete ksvc m2k-save-transformation-func -helm upgrade move2kube ${M2K_HELM_REPO} -n ${TARGET_NS} --set workflow.move2kubeURL=https://${M2K_ROUTE} +helm upgrade move2kube ${M2K_HELM_REPO} -n ${TARGET_NS} --set workflow.move2kubeURL=${M2K_ROUTE} -"${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'"}}' -BACKSTAGE_NOTIFICATIONS_URL=http://backstage-backstage.rhdh-operator +if [[ ! -z "${K8S_INSTALL}" ]]; then + "${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secret orchestrator-auth -o jsonpath={.data.backend-secret})'"}}' + BACKSTAGE_NOTIFICATIONS_URL=http://orchestrator-backstage.default.svc.cluster.local:7007 +else + "${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'"}}' + BACKSTAGE_NOTIFICATIONS_URL=http://backstage-backstage.rhdh-operator +fi BROKER_URL=$("${CLUSTER_CLIENT}" -n ${TARGET_NS} get broker -o yaml | yq -r .items[0].status.address.url) -"${CLUSTER_CLIENT}" -n ${TARGET_NS} patch sonataflow m2k --type merge -p '{"spec": { "podTemplate": { "container": { "env": [{"name": "BACKSTAGE_NOTIFICATIONS_URL", "value": "'${BACKSTAGE_NOTIFICATIONS_URL}'"},{"name": "K_SINK", "value": "'${BROKER_URL}'"}, {"name": "MOVE2KUBE_URL", "value": "https://'${M2K_ROUTE}'"}]}}}}' -"${CLUSTER_CLIENT}" -n ${TARGET_NS} wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m \ No newline at end of file +"${CLUSTER_CLIENT}" -n ${TARGET_NS} patch sonataflow m2k --type merge -p '{"spec": { "podTemplate": { "container": { "env": [{"name": "BACKSTAGE_NOTIFICATIONS_URL", "value": "'${BACKSTAGE_NOTIFICATIONS_URL}'"},{"name": "K_SINK", "value": "'${BROKER_URL}'"}, {"name": "MOVE2KUBE_URL", "value": "'${M2K_ROUTE}'"}]}}}}' +"${CLUSTER_CLIENT}" -n ${TARGET_NS} scale deploy m2k --replicas=0 +"${CLUSTER_CLIENT}" -n ${TARGET_NS} get pods +"${CLUSTER_CLIENT}" -n ${TARGET_NS} describe pods -l app="${WORKFLOW_NAME}" +"${CLUSTER_CLIENT}" -n ${TARGET_NS} wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=2m +"${CLUSTER_CLIENT}" -n ${TARGET_NS} describe pods -l app="${WORKFLOW_NAME}" diff --git a/docs/main/mta-v7.x/README.md b/docs/main/mta-v7.x/README.md index f10f91e7..9d656469 100644 --- a/docs/main/mta-v7.x/README.md +++ b/docs/main/mta-v7.x/README.md @@ -33,13 +33,13 @@ See https://www.parodos.dev/orchestrator-helm-chart/postgresql on how to install ## Automated installation Run the [installation script](install-mta-v7.sh): ```console -./install-mta-v7.sh +TARGET_NS=sonataflow-infra ./install-mta-v7.sh ``` You can override the helm repo to use by setting `MTA_HELM_REPO`. By default `orchestrator-workflows/mta-v7` is used and the helm repository `orchestrator-workflows` is installed from `https://parodos.dev/serverless-workflows-config` To use the local file, set `MTA_HELM_REPO` to `.`: ```console -MTA_HELM_REPO=. ./install-mta-v7.sh +TARGET_NS=sonataflow-infra MTA_HELM_REPO=. ./install-mta-v7.sh ``` ## Manual installation diff --git a/docs/main/mta-v7.x/install-mta-v7.sh b/docs/main/mta-v7.x/install-mta-v7.sh index 91f4886a..e7a59ff7 100755 --- a/docs/main/mta-v7.x/install-mta-v7.sh +++ b/docs/main/mta-v7.x/install-mta-v7.sh @@ -1,7 +1,9 @@ #!/bin/bash CLUSTER_CLIENT=$(which "${CLUSTER_CLIENT}" >/dev/null 2>&1 && echo oc || echo kubectl) -TARGET_NS=sonataflow-infra +if [[ ! -z "${K8S_INSTALL}" ]]; then + echo "Running on k8s, adapting the script" +fi if [[ -z "${MTA_HELM_REPO}" ]]; then MTA_HELM_REPO=orchestrator-workflows/mta-v7 @@ -9,14 +11,30 @@ if [[ -z "${MTA_HELM_REPO}" ]]; then helm repo add orchestrator-workflows https://parodos.dev/serverless-workflows-config fi +if [[ -z "${TARGET_NS}" ]]; then + echo 'TARGET_NS env variable must be set to the namespace in which the workflow must be installed' + exit -1 +fi + helm install mta ${MTA_HELM_REPO} -n ${TARGET_NS} WORKFLOW_NAME=mta-analysis-v7 -"${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'"}}' -while [[ $retry_count -lt 5 ]]; do - "${CLUSTER_CLIENT}" -n openshift-mta get route mta && break || sleep 60 - retry_count=$((retry_count + 1)) -done -MTA_ROUTE=$("${CLUSTER_CLIENT}" -n openshift-mta get route mta -o yaml | yq -r .spec.host) -BACKSTAGE_NOTIFICATIONS_URL=http://backstage-backstage.rhdh-operator -"${CLUSTER_CLIENT}" -n "${TARGET_NS}" patch sonataflow mta-analysis-v7 --type merge -p '{"spec": { "podTemplate": { "container": { "env": [{"name": "BACKSTAGE_NOTIFICATIONS_URL", "value": "'${BACKSTAGE_NOTIFICATIONS_URL}'"}, {"name": "MTA_URL", "value": "https://'${MTA_ROUTE}'"}]}}}}' -"${CLUSTER_CLIENT}" -n "${TARGET_NS}" wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m \ No newline at end of file +if [[ -z "${K8S_INSTALL}" ]]; then + "${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'"}}' + BACKSTAGE_NOTIFICATIONS_URL=http://backstage-backstage.rhdh-operator + + while [[ $retry_count -lt 5 ]]; do + "${CLUSTER_CLIENT}" -n openshift-mta get route mta && break || sleep 60 + retry_count=$((retry_count + 1)) + done + MTA_ROUTE="https://"$("${CLUSTER_CLIENT}" -n openshift-mta get route mta -o yaml | yq -r .spec.host) +else + "${CLUSTER_CLIENT}" -n ${TARGET_NS} patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{"data": { "NOTIFICATIONS_BEARER_TOKEN": "'$("${CLUSTER_CLIENT}" get secret orchestrator-auth -o jsonpath={.data.backend-secret})'"}}' + BACKSTAGE_NOTIFICATIONS_URL=http://orchestrator-backstage.default.svc.cluster.local:7007 + MTA_ROUTE="http://tackle-ui.my-konveyor-operator.svc.cluster.local:8080" +fi +"${CLUSTER_CLIENT}" -n "${TARGET_NS}" patch sonataflow "${WORKFLOW_NAME}" --type merge -p '{"spec": { "podTemplate": { "container": { "env": [{"name": "BACKSTAGE_NOTIFICATIONS_URL", "value": "'${BACKSTAGE_NOTIFICATIONS_URL}'"}, {"name": "MTA_URL", "value": "'${MTA_ROUTE}'"}]}}}}' +"${CLUSTER_CLIENT}" -n ${TARGET_NS} scale deploy "${WORKFLOW_NAME}" --replicas=0 +"${CLUSTER_CLIENT}" -n ${TARGET_NS} get pods +"${CLUSTER_CLIENT}" -n ${TARGET_NS} describe pods -l app="${WORKFLOW_NAME}" +"${CLUSTER_CLIENT}" -n "${TARGET_NS}" wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=2m +"${CLUSTER_CLIENT}" -n ${TARGET_NS} describe pods -l app="${WORKFLOW_NAME}" diff --git a/e2e/move2kube.sh b/e2e/move2kube.sh new file mode 100755 index 00000000..5f1c46b0 --- /dev/null +++ b/e2e/move2kube.sh @@ -0,0 +1,238 @@ +#!/bin/bash + +set -x +set -e + +# holds the pid of the port forward process for cleanups +export port_forward_pid="" +export M2K_INSTANCE_NS="move2kube" + +function cleanup() { + echo "cleanup $?" + kill "$port_forward_pid" || true + kill "$move2kube_port_forward_pid" || true +} + +function getAllNotifications() { + GUEST_TOKEN=$(curl $BACKSTAGE_URL/api/auth/guest/refresh | jq -r .backstageIdentity.token) + curl -s -H "Authorization: Bearer ${GUEST_TOKEN}" "${BACKSTAGE_NOTIFICATION_URL}" | jq ".notifications" +} + +trap 'cleanup' EXIT SIGTERM + +echo "Proxy Janus-idp port ⏳" +kubectl port-forward "$(kubectl get svc -l app.kubernetes.io/component=backstage -o name)" 9080:7007 & +port_forward_pid="$!" +sleep 3 +echo "Proxy Janus-idp port ✅" + +echo "Proxy move2kube instance port ⏳" +kubectl -n "${M2K_INSTANCE_NS}" port-forward svc/move2kube-svc 8080:8080 & +move2kube_port_forward_pid="$!" +sleep 3 +echo "Proxy move2kube instance port ✅" + + +echo "End to end tests start ⏳" +MOVE2KUBE_URL="http://localhost:8080" +BACKSTAGE_URL="http://localhost:9080" +BACKSTAGE_NOTIFICATION_URL="${BACKSTAGE_URL}/api/notifications/" +GIT_ORG="gfarache31/m2k-test" +GIT_REPO="bitbucket.org/${GIT_ORG}" +GIT_SOURCE_BRANCH="master" +GIT_TARGET_BRANCH="e2e-test-$(date +%s)" +echo "Creating workspace and project in move2kube instance" +WORKSPACE_ID=$(curl -X POST "${MOVE2KUBE_URL}/api/v1/workspaces" -H 'Content-Type: application/json' --data '{"name": "e2e Workspace", "description": "e2e tests"}' | jq -r .id) +PROJECT_ID=$(curl -X POST "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects" -H 'Content-Type: application/json' --data '{"name": "e2e Project", "description": "e2e tests"}' | jq -r .id) + +echo "Wait until M2K workflow is available in backstage..." +M2K_STATUS=$(curl -XGET -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer ${BACKEND_SECRET}" ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/overview) +until [ "$M2K_STATUS" -eq 200 ] +do +sleep 5 +M2K_STATUS=$(curl -XGET -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer ${BACKEND_SECRET}" ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/overview) +done + +echo "M2K is available in backstage, sending execution request" +out=$(curl -XPOST -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" \ + ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/execute \ + -d "{\"inputData\": {\"repositoryURL\": \"ssh://${GIT_REPO}\", \"recipients\": [\"user:default/guest\"], \"sourceBranch\": \"${GIT_SOURCE_BRANCH}\", \"targetBranch\": \"${GIT_TARGET_BRANCH}\", \"workspaceId\": \"${WORKSPACE_ID}\", \"projectId\": \"${PROJECT_ID}\"}}") +ID=$(echo "$out" | jq -r -e .id) + +echo "Workflow ID: ${ID}" + +if [ -z "$ID" ] || [ "$ID" == "null" ]; then + echo "workflow instance id is null... exiting " + exit 1 +fi + + +echo "Wait until plan exists" +retries=20 +http_status=$(curl -X GET -s -o /dev/null -w "%{http_code}" "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/plan") +while [ ${retries} -ne 0 ] && [ "${http_status}" -eq 404 ]; do +echo "Wait until plan exists" + sleep 5 + retries=$((retries-1)) + http_status=$(curl -X GET -s -o /dev/null -w "%{http_code}" "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/plan") +done + +if [ "${http_status}" -eq 204 ] +then + echo "Plan not created, error when creating it, checks move2kbe logs, http status=${http_status}...exiting " + exit 1 +fi + +if [ "${http_status}" -eq 404 ] +then + echo "Plan not created, http status=${http_status}...exiting " + exit 1 +fi + + +GUEST_TOKEN=$(curl $BACKSTAGE_URL/api/auth/guest/refresh | jq -r .backstageIdentity.token) + +echo "Checking if Q&A waiting notification with move2kube URL received" +retries=20 +while test ${retries} -ne 0 && getAllNotifications | jq -e '.|length == 0' ; do +echo "Wait until a message arrives" + sleep 5 + retries=$((retries-1)) +done + +ALL_NOTIFICATION=$(getAllNotifications) +printf "All notifications\n%s\n" "$ALL_NOTIFICATION" +if printf "%s" "$ALL_NOTIFICATION" | jq -e '.|length == 0' +then + printf "No notification found. The full reply is %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + +NOTIFICATION=$(printf "%s" "$ALL_NOTIFICATION" | jq '.[0]') +if printf "%s" "${NOTIFICATION}" | jq ".payload.link | select(contains(\"${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs\"))" +then + printf "Notification has payload link with matching URL: %s\n\n" "${NOTIFICATION}" +else + printf "Notification has no payload link with matching URL: %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + +echo "Checking if Knative function running" +nb_pods=$(kubectl get pods -l app=m2k-save-transformation-func-v1 -no-headers | wc -l) +retries=20 +while [[ ${retries} -ne 0 && ${nb_pods} -eq 0 ]]; do +echo "Wait until Knative function running" + sleep 5 + retries=$((retries-1)) + nb_pods=$(kubectl get pods -l app=m2k-save-transformation-func-v1 --no-headers | wc -l) +done + +if [[ $nb_pods -ne 1 ]] +then + echo "Knative function not running...exiting " + exit 1 +fi + +echo "Answering Q&A to continue workflow" +TRANSFORMATION_ID=$(curl "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}" | jq -r '.outputs | keys'[0]) +current_question=$(curl -X GET "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current") +question_id=$(echo "${current_question}" | jq -r '.question | fromjson | .id' | sed -r -e 's/"/\\\\\\\"/g') +default_answer=$(echo "${current_question}" | jq '.question | fromjson | .default' | sed -r -e 's/"/\\"/g' | tr '\n' ' ') +while [ "${question_id}" != "" ]; do + curl -iX POST "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current/solution" \ + -H 'Content-Type: application/json' \ + -d "{\"solution\": \"{\\\"id\\\":\\\"${question_id}\\\",\\\"answer\\\":${default_answer}}\"}" + current_question=$(curl -X GET "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current") + question_id=$(echo "${current_question}" | jq -r '.question | fromjson | .id' | sed -r -e 's/"/\\\\\\\"/g') + default_answer=$(echo "${current_question}" | jq '.question | fromjson | .default' | sed -r -e 's/"/\\"/g' | tr '\n' ' ') +done + +echo "Checking if workflow completed successfully" + +curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" + +state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +retries=20 +while [[ ${retries} -ne 0 && "$state" != "COMPLETED" ]]; do + sleep 5 + retries=$((retries-1)) + curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" + state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +done + +if [ "$state" != "COMPLETED" ]; then + echo "workflow instance state is '${state}', should be 'COMPLETED'... exiting " + exit 1 +fi + +echo "Checking if branch ${GIT_TARGET_BRANCH} created on git repo ${GIT_REPO}" + +http_status=$(curl -X GET -L -s -o /dev/null -w "%{http_code}" "https://api.bitbucket.org/2.0/repositories/${GIT_ORG}/refs/branches/${GIT_TARGET_BRANCH}") +retries=20 +while [[ ${retries} -ne 0 && ${http_status} -eq 404 ]]; do + sleep 5 + retries=$((retries-1)) +http_status=$(curl -X GET -L -s -o /dev/null -w "%{http_code}" "https://api.bitbucket.org/2.0/repositories/${GIT_ORG}/refs/branches/${GIT_TARGET_BRANCH}") +done +if [ "${http_status}" -eq 404 ] +then + echo "Branch ${GIT_TARGET_BRANCH} not created on repo ${GIT_REPO}...exiting " + exit 1 +else + echo "Branch ${GIT_TARGET_BRANCH} successfully created on repo ${GIT_REPO}! " +fi + +echo "Checking if completion notification received" +retries=20 +while test ${retries} -ne 0 && getAllNotifications | jq -e '.|length == 1' ; do +echo "Wait until a message arrives, expecting 2 messages overall" + sleep 5 + retries=$((retries-1)) +done + +ALL_NOTIFICATION=$(getAllNotifications) +printf "All notifications\n%s\n" "$ALL_NOTIFICATION" + +if printf "%s" "$ALL_NOTIFICATION" | jq -e '.|length == 1' +then + printf "No notification with result found - expecting success or failure notification. The full reply is %s\n\nexiting " "${ALL_NOTIFICATION}" + exit 1 +fi + +NOTIFICATION=$(printf "%s" "$ALL_NOTIFICATION" | jq '.[0]') +if printf "%s" "$NOTIFICATION" | jq -e '.payload| (.severity != "high" and .severity != "critical" )' +then + printf "Notification has NO result with high or critical severuty in it: %s\n\n" "${NOTIFICATION}" +else + printf "Notification has result high or critical severity in it: %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + + +echo "Checking that when wrong inputs parameters, the workflows ends in error" +out=$(curl -XPOST -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" \ + ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/execute \ + -d "{\"inputData\": {\"repositoryURL\": \"ssh://${GIT_REPO}_WRONG\", \"recipients\": [\"user:default/guest\"], \"sourceBranch\": \"${GIT_SOURCE_BRANCH}\", \"targetBranch\": \"${GIT_TARGET_BRANCH}\", \"workspaceId\": \"${WORKSPACE_ID}\", \"projectId\": \"${PROJECT_ID}\"}}") +ID=$(echo "$out" | jq -r -e .id) + +echo "Workflow ID: ${ID}" + +if [ -z "$ID" ] || [ "$ID" == "null" ]; then + echo "workflow instance id is null... exiting " + exit 1 +fi + +state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +retries=20 +while [[ ${retries} -ne 0 && "$state" != "ERROR" ]]; do + sleep 5 + retries=$((retries-1)) + state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +done + +if [ "$state" != "ERROR" ]; then + echo "workflow instance state is '${state}', should be 'ERROR'... exiting " + exit 1 +fi + +echo "End to end tests passed ✅"