Skip to content

Commit

Permalink
chore(ci): fix k8s jobs, better shebang (#2074)
Browse files Browse the repository at this point in the history
* Fix k8s tests

Update aks.sh

Fix

Fix configmap

More fixes

* Fix bash

* Re-enable tekton pipelines

* droute only in CI env.

* Better shabang

* Indentation and whitespace

* aks stop only in CI

* Check AKS cluster state before starting

* Better Tekton pipelines

* Fix dynamic homepage for k8s

* Add all required variables AKS GKE

* rather use k8s instead of AKS/GKE

* make test cluster-agnostic

* Fix tekton pipelines check

* Move condition inside droute
  • Loading branch information
zdrapela authored Dec 19, 2024
1 parent f5def5a commit f7c76d7
Show file tree
Hide file tree
Showing 14 changed files with 91 additions and 46 deletions.
16 changes: 14 additions & 2 deletions .ibm/pipelines/cluster/aks/az.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/bin/bash

az_login() {
az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID
az account set --subscription $ARM_SUBSCRIPTION_ID
Expand All @@ -6,7 +8,16 @@ az_login() {
az_aks_start() {
local name=$1
local resource_group=$2
az aks start --name $name --resource-group $resource_group

local power_state
power_state=$(az aks show --name=$name --resource-group $resource_group --query 'powerState.code' -o tsv)

if [ "$power_state" == "Running" ]; then
echo "AKS cluster is running."
else
echo "AKS cluster is not running (Current state: $power_state). Starting the cluster."
az aks start --name $name --resource-group $resource_group
fi
}

az_aks_stop() {
Expand All @@ -19,7 +30,8 @@ az_aks_approuting_enable() {
local name=$1
local resource_group=$2
set +xe
local output=$(az aks approuting enable --name $name --resource-group $resource_group 2>&1 | sed 's/^ERROR: //')
local output
output=$(az aks approuting enable --name $name --resource-group $resource_group 2>&1 | sed 's/^ERROR: //')
set -e
exit_status=$?

Expand Down
12 changes: 6 additions & 6 deletions .ibm/pipelines/cluster/aks/deployment.sh
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
#!/bin/bash

initiate_aks_deployment() {
add_helm_repos
delete_namespace "${NAME_SPACE_RBAC_K8S}"
configure_namespace "${NAME_SPACE_K8S}"
# Renable when namespace termination issue is solved
# install_tekton_pipelines
install_tekton_pipelines
uninstall_helmchart "${NAME_SPACE_K8S}" "${RELEASE_NAME}"
cd "${DIR}"
cd "${DIR}" || exit
local rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
apply_yaml_files "${DIR}" "${NAME_SPACE_K8S}" "${rhdh_base_url}"
yq_merge_value_files "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" "${DIR}/value_files/${HELM_CHART_AKS_DIFF_VALUE_FILE_NAME}" "/tmp/${HELM_CHART_K8S_MERGED_VALUE_FILE_NAME}"
Expand All @@ -23,10 +24,9 @@ initiate_rbac_aks_deployment() {
add_helm_repos
delete_namespace "${NAME_SPACE_K8S}"
configure_namespace "${NAME_SPACE_RBAC_K8S}"
# Renable when namespace termination issue is solved
# install_tekton_pipelines
install_tekton_pipelines
uninstall_helmchart "${NAME_SPACE_RBAC_K8S}" "${RELEASE_NAME_RBAC}"
cd "${DIR}"
cd "${DIR}" || exit
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC_K8S}" "${rbac_rhdh_base_url}"
yq_merge_value_files "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" "${DIR}/value_files/${HELM_CHART_RBAC_AKS_DIFF_VALUE_FILE_NAME}" "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}"
Expand Down
12 changes: 6 additions & 6 deletions .ibm/pipelines/cluster/gke/deployment.sh
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
#!/bin/bash

initiate_gke_deployment() {
gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT
add_helm_repos
delete_namespace "${NAME_SPACE_RBAC_K8S}"
configure_namespace "${NAME_SPACE_K8S}"
# Renable when namespace termination issue is solved
# install_tekton_pipelines
install_tekton_pipelines
uninstall_helmchart "${NAME_SPACE_K8S}" "${RELEASE_NAME}"
cd "${DIR}"
cd "${DIR}" || exit
local rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
apply_yaml_files "${DIR}" "${NAME_SPACE_K8S}" "${rhdh_base_url}"
oc apply -f "${DIR}/cluster/gke/frontend-config.yaml" --namespace="${project}"
Expand All @@ -27,10 +28,9 @@ initiate_rbac_gke_deployment() {
add_helm_repos
delete_namespace "${NAME_SPACE_K8S}"
configure_namespace "${NAME_SPACE_RBAC_K8S}"
# Renable when namespace termination issue is solved
# install_tekton_pipelines
install_tekton_pipelines
uninstall_helmchart "${NAME_SPACE_RBAC_K8S}" "${RELEASE_NAME_RBAC}"
cd "${DIR}"
cd "${DIR}" || exit
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC_K8S}" "${rbac_rhdh_base_url}"
yq_merge_value_files "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" "${DIR}/value_files/${HELM_CHART_RBAC_GKE_DIFF_VALUE_FILE_NAME}" "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}"
Expand Down
5 changes: 4 additions & 1 deletion .ibm/pipelines/cluster/gke/gcloud.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/bin/bash

gcloud_auth() {
local service_account_name=$1
local service_account_key_location=$2
Expand All @@ -18,7 +20,8 @@ gcloud_ssl_cert_create() {

# Capture both stdout and stderr
set +xe
local output=$(gcloud compute ssl-certificates create "${cert_name}" --domains="${domain}" --project="${project}" --global 2>&1)
local output
output=$(gcloud compute ssl-certificates create "${cert_name}" --domains="${domain}" --project="${project}" --global 2>&1)
set -e

# Check the return status
Expand Down
2 changes: 2 additions & 0 deletions .ibm/pipelines/cluster/osd-gcp/create-osd.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/bin/bash

export OC_URL=https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux.tar.gz
export OI_URL=https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-install-linux.tar.gz

Expand Down
2 changes: 2 additions & 0 deletions .ibm/pipelines/cluster/osd-gcp/destroy-osd.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/bin/bash

export OC_URL=https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux.tar.gz
export OI_URL=https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-install-linux.tar.gz

Expand Down
13 changes: 10 additions & 3 deletions .ibm/pipelines/jobs/aks.sh
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
#!/bin/sh
#!/bin/bash

handle_aks() {
echo "Starting AKS deployment"
for file in ${DIR}/cluster/aks/*.sh; do source $file; done

export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL)
export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN)
export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/AKS_CLUSTER_TOKEN)
export K8S_CLUSTER_TOKEN_ENCODED=$(printf "%s" $K8S_CLUSTER_TOKEN | base64 | tr -d '\n')
export K8S_SERVICE_ACCOUNT_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
export OCM_CLUSTER_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED

export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME
export NAME_SPACE_K8S="showcase-k8s-ci-nightly"
export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly"
Expand All @@ -17,6 +20,10 @@ handle_aks() {
az_aks_approuting_enable "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"
az_aks_get_credentials "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"

export K8S_CLUSTER_URL=$(oc whoami --show-server)
export K8S_CLUSTER_API_SERVER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
export OCM_CLUSTER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')

initiate_aks_deployment
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}"
delete_namespace "${NAME_SPACE_K8S}"
Expand Down
12 changes: 11 additions & 1 deletion .ibm/pipelines/jobs/gke.sh
Original file line number Diff line number Diff line change
@@ -1,17 +1,27 @@
#!/bin/sh
#!/bin/bash

handle_gke() {
echo "Starting GKE deployment"
for file in ${DIR}/cluster/gke/*.sh; do source $file; done

export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/GKE_CLUSTER_TOKEN)
export K8S_CLUSTER_TOKEN_ENCODED=$(printf "%s" $K8S_CLUSTER_TOKEN | base64 | tr -d '\n')
export K8S_SERVICE_ACCOUNT_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
export OCM_CLUSTER_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED

export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME
export NAME_SPACE_K8S="showcase-k8s-ci-nightly"
export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly"

url="https://${K8S_CLUSTER_ROUTER_BASE}"

gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY"
gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}"

export K8S_CLUSTER_URL=$(oc whoami --show-server)
export K8S_CLUSTER_API_SERVER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
export OCM_CLUSTER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')

initiate_gke_deployment
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}"
delete_namespace "${NAME_SPACE_K8S}"
Expand Down
2 changes: 1 addition & 1 deletion .ibm/pipelines/jobs/main.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

handle_main() {
echo "Configuring namespace: ${NAME_SPACE}"
Expand Down
2 changes: 1 addition & 1 deletion .ibm/pipelines/jobs/operator.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

install_rhdh_operator() {
local dir=$1
Expand Down
2 changes: 1 addition & 1 deletion .ibm/pipelines/jobs/periodic.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

handle_nightly() {
oc_login
Expand Down
7 changes: 4 additions & 3 deletions .ibm/pipelines/openshift-ci-tests.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

set -e
export PS4='[$(date "+%Y-%m-%d %H:%M:%S")] ' # logs timestamp for every cmd.
Expand All @@ -7,9 +7,10 @@ LOGFILE="test-log"
export DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
OVERALL_RESULT=0

# shellcheck disable=SC2317
cleanup() {
echo "Cleaning up before exiting"
if [[ "$JOB_NAME" == *aks* ]]; then
if [[ "$JOB_NAME" == *aks* && "${OPENSHIFT_CI}" == "true" ]]; then
az_aks_stop "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"
fi
rm -rf ~/tmpbin
Expand Down Expand Up @@ -44,7 +45,7 @@ main() {
;;
*gke*)
echo "Calling handle_gke"
handle_gke
handle_gke
;;
*operator*)
echo "Calling Operator"
Expand Down
47 changes: 27 additions & 20 deletions .ibm/pipelines/utils.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

retrieve_pod_logs() {
local pod_name=$1; local container=$2; local namespace=$3
Expand Down Expand Up @@ -36,6 +36,7 @@ save_all_pod_logs(){
}

droute_send() {
if [[ "${OPENSHIFT_CI}" != "true" ]]; then return 0; fi
temp_kubeconfig=$(mktemp) # Create temporary KUBECONFIG to open second `oc` session
( # Open subshell
if [ -n "${PULL_NUMBER:-}" ]; then
Expand Down Expand Up @@ -223,7 +224,7 @@ wait_for_deployment() {
local is_ready=$(oc get pod "$pod_name" -n "$namespace" -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}')
# Verify pod is both Ready and Running
if [[ "$is_ready" == "True" ]] && \
oc get pod "$pod_name" -n "$namespace" | grep -q "Running"; then
oc get pod "$pod_name" -n "$namespace" | grep -q "Running"; then
echo "Pod '$pod_name' is running and ready"
return 0
else
Expand Down Expand Up @@ -334,7 +335,7 @@ configure_namespace() {
echo "Error: Failed to create namespace ${project}" >&2
exit 1
fi
if ! oc config set-context --current --namespace="${project}"; then
if ! oc config set-context --current --namespace="${project}"; then
echo "Error: Failed to set context for namespace ${project}" >&2
exit 1
fi
Expand Down Expand Up @@ -433,11 +434,11 @@ apply_yaml_files() {
create_app_config_map_k8s "$config_file" "$project"
else
create_app_config_map "$config_file" "$project"
oc create configmap dynamic-homepage-and-sidebar-config \
fi
oc create configmap dynamic-homepage-and-sidebar-config \
--from-file="dynamic-homepage-and-sidebar-config.yaml"="$dir/resources/config_map/dynamic-homepage-and-sidebar-config.yaml" \
--namespace="${project}" \
--dry-run=client -o yaml | oc apply -f -
fi
oc create configmap rbac-policy \
--from-file="rbac-policy.csv"="$dir/resources/config_map/rbac-policy.csv" \
--namespace="$project" \
Expand All @@ -462,7 +463,7 @@ deploy_test_backstage_provider() {
else
echo "BuildConfig for test-backstage-customization-provider already exists in ${project}. Skipping new-app creation."
fi

echo "Exposing service for test-backstage-customization-provider"
oc expose svc/test-backstage-customization-provider --namespace="${project}"
}
Expand Down Expand Up @@ -501,7 +502,7 @@ create_app_config_map_k8s() {
local config_file=$1
local project=$2

echo "Creating app-config ConfigMap for AKS/GKE in namespace ${project}"
echo "Creating k8s-specific app-config ConfigMap in namespace ${project}"

yq 'del(.backend.cache)' "$config_file" \
| oc create configmap app-config-rhdh \
Expand Down Expand Up @@ -549,8 +550,6 @@ run_tests() {
cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}"
cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}"

droute_send "${release_name}" "${project}"

echo "${project} RESULT: ${RESULT}"
if [ "${RESULT}" -ne 0 ]; then
OVERALL_RESULT=1
Expand Down Expand Up @@ -589,17 +588,6 @@ check_backstage_running() {
return 1
}

install_tekton_pipelines() {
local dir=$1

if oc get pods -n "tekton-pipelines" | grep -q "tekton-pipelines"; then
echo "Tekton Pipelines are already installed."
else
echo "Tekton Pipelines is not installed. Installing..."
oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
fi
}

# installs the advanced-cluster-management Operator
install_acm_operator(){
oc apply -f "${DIR}/cluster/operators/acm/operator-group.yaml"
Expand Down Expand Up @@ -637,6 +625,25 @@ install_pipelines_operator() {
fi
}

# Installs the Tekton Pipelines if not already installed (alternative of OpenShift Pipelines for Kubernetes clusters)
install_tekton_pipelines() {
DISPLAY_NAME="tekton-pipelines-webhook"
if oc get pods -n "tekton-pipelines" | grep -q "${DISPLAY_NAME}"; then
echo "Tekton Pipelines are already installed."
else
echo "Tekton Pipelines is not installed. Installing..."
oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
wait_for_deployment "tekton-pipelines" "${DISPLAY_NAME}"
timeout 300 bash -c '
while ! oc get svc tekton-pipelines-webhook -n tekton-pipelines &> /dev/null; do
echo "Waiting for tekton-pipelines-webhook service to be created..."
sleep 5
done
echo "Service tekton-pipelines-webhook is created."
' || echo "Error: Timed out waiting for tekton-pipelines-webhook service creation."
fi
}

cluster_setup() {
install_pipelines_operator
install_acm_operator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ test.describe("Test Customized Quick Access and tech-radar plugin", () => {
await uiHelper.verifyHeading("Company Radar");

await techRadar.verifyRadarDetails("Languages", "JavaScript");
await techRadar.verifyRadarDetails("Storage", "AWS S3");
// TODO: This is cluster-dependent and we need tests cluster-agnostic, remove if not needed
// await techRadar.verifyRadarDetails("Storage", "AWS S3");
await techRadar.verifyRadarDetails("Frameworks", "React");
await techRadar.verifyRadarDetails("Infrastructure", "GitHub Actions");
});
Expand Down

0 comments on commit f7c76d7

Please sign in to comment.