-
Notifications
You must be signed in to change notification settings - Fork 39
/
Jenkinsfile
309 lines (274 loc) · 13.9 KB
/
Jenkinsfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
pipeline {
parameters {
choice(name: 'action', choices: 'create\ndestroy', description: 'Create/update or destroy the eks cluster.')
string(name: 'cluster', defaultValue : 'demo', description: "EKS cluster name.")
choice(name: 'k8s_version', choices: '1.21\n1.20\n1.19\n1.18\n1.17\n1.16', description: 'K8s version to install.')
string(name: 'vpc_network', defaultValue : '10.0', description: "First 2 octets of vpc network; eg 10.0")
string(name: 'num_subnets', defaultValue : '3', description: "Number of vpc subnets/AZs.")
string(name: 'instance_type', defaultValue : 'm5.large', description: "k8s worker node instance type.")
string(name: 'num_workers', defaultValue : '3', description: "k8s number of worker instances.")
string(name: 'max_workers', defaultValue : '10', description: "k8s maximum number of worker instances that can be scaled.")
string(name: 'admin_users', defaultValue : '', description: "Comma delimited list of IAM users to add to the aws-auth config map.")
string(name: 'credential', defaultValue : 'jenkins', description: "Jenkins credential that provides the AWS access key and secret.")
string(name: 'key_pair', defaultValue : 'spicysomtam-aws7', description: "EC2 instance ssh keypair.")
booleanParam(name: 'cw_logs', defaultValue : true, description: "Setup Cloudwatch logging?")
booleanParam(name: 'cw_metrics', defaultValue : false, description: "Setup Cloudwatch metrics and Container Insights?")
booleanParam(name: 'metrics_server', defaultValue : true, description: "Setup k8s metrics-server?")
booleanParam(name: 'dashboard', defaultValue : false, description: "Setup k8s dashboard?")
booleanParam(name: 'prometheus', defaultValue : true, description: "Setup k8s prometheus?")
booleanParam(name: 'nginx_ingress', defaultValue : true, description: "Setup nginx ingress and load balancer?")
booleanParam(name: 'ca', defaultValue : false, description: "Setup k8s Cluster Autoscaler?")
booleanParam(name: 'cert_manager', defaultValue : false, description: "Setup cert-manager for certificate handling?")
string(name: 'region', defaultValue : 'eu-west-1', description: "AWS region.")
}
options {
disableConcurrentBuilds()
timeout(time: 1, unit: 'HOURS')
withAWS(credentials: params.credential, region: params.region)
ansiColor('xterm')
}
agent { label 'master' }
environment {
// Set path to workspace bin dir
PATH = "${env.WORKSPACE}/bin:${env.PATH}"
// Workspace kube config so we don't affect other Jenkins jobs
KUBECONFIG = "${env.WORKSPACE}/.kube/config"
}
tools {
terraform '1.0'
}
stages {
stage('Setup') {
steps {
script {
currentBuild.displayName = "#" + env.BUILD_NUMBER + " " + params.action + " " + params.cluster
plan = params.cluster + '.plan'
println "Getting the kubectl and helm binaries..."
(major, minor) = params.k8s_version.split(/\./)
sh """
[ ! -d bin ] && mkdir bin
( cd bin
# 'latest' kubectl is backward compatible with older api versions
curl --silent -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/kubectl
curl -fsSL -o - https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz | tar -xzf - linux-amd64/helm
mv linux-amd64/helm .
rm -rf linux-amd64
chmod u+x kubectl helm
ls -l kubectl helm )
"""
// This will halt the build if jq not found
println "Checking jq is installed:"
sh "which jq"
}
}
}
stage('TF Plan') {
when {
expression { params.action == 'create' }
}
steps {
script {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: params.credential,
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh """
terraform init
terraform workspace new ${params.cluster} || true
terraform workspace select ${params.cluster}
terraform plan \
-var cluster-name=${params.cluster} \
-var vpc-network=${params.vpc_network} \
-var vpc-subnets=${params.num_subnets} \
-var inst-type=${params.instance_type} \
-var num-workers=${params.num_workers} \
-var max-workers=${params.max_workers} \
-var cw_logs=${params.cw_logs} \
-var inst_key_pair=${params.key_pair} \
-var ca=${params.ca} \
-var k8s_version=${params.k8s_version} \
-var aws_region=${params.region} \
-out ${plan}
"""
}
}
}
}
stage('TF Apply') {
when {
expression { params.action == 'create' }
}
steps {
script {
input "Create/update Terraform stack ${params.cluster} in aws?"
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: params.credential,
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh "terraform apply -input=false -auto-approve ${plan}"
}
}
}
}
stage('Cluster setup') {
when {
expression { params.action == 'create' }
}
steps {
script {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: params.credential,
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh "aws eks update-kubeconfig --name ${params.cluster} --region ${params.region}"
// If admin_users specified
if (params.admin_users != '') {
echo "Adding admin_users to configmap aws-auth."
sh "./generate-aws-auth-admins.sh ${params.admin_users} | kubectl apply -f -"
}
// CW Metrics and Container Insights setup
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-prerequisites.html
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-quickstart.html
if (params.cw_metrics == true) {
echo "Setting up Cloudwatch metrics and Container Insights."
sh """
curl --silent https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/quickstart/cwagent-fluentd-quickstart.yaml | \\
sed "s/{{cluster_name}}/${params.cluster}/;s/{{region_name}}/${params.region}/" | \\
kubectl apply -f -
"""
}
// https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html
// Need metrics server for horizontal and vertical pod autoscalers, prometheus and k8s dashboard
if (params.metrics_server == true) {
echo "Setting up k8s metrics-server."
sh "kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml"
}
// https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html
if (params.dashboard == true) {
echo "Setting up k8s dashboard."
sh """
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml
kubectl apply -f eks-admin-service-account.yaml
"""
echo "You need to get the secret token and then use kubectl proxy to get to the dashboard:"
echo "kubectl -n kube-system describe secret \$(kubectl -n kube-system get secret | grep eks-admin | awk '{print \$1}')"
echo "kubectl proxy"
echo "Then visit: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login"
echo "See docs at https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html"
}
// https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html
if (params.prometheus == true) {
echo "Setting up k8s prometheus."
sh """
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm install prometheus prometheus-community/prometheus \
--namespace prometheus \
--create-namespace \
--set alertmanager.persistentVolume.storageClass="gp2",server.persistentVolume.storageClass="gp2"
"""
echo "To connect to prometheus, follow the instructions above, then connect to http://localhost:9090"
echo "See docs at https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html"
echo "Alternativly use k8s Lens which is much easier (choose Helm for the Prometheus setup its not auto detected)."
}
if (params.ca == true) {
echo "Setting up k8s Cluster Autoscaler."
// Keep the google region logic simple; us or eu
gregion='us'
if (params.region =~ '^eu') {
gregion='eu'
}
// CA image tag, which is k8s major version plus CA minor version.
// See for latest versions: https://github.com/kubernetes/autoscaler/releases
switch (params.k8s_version) {
case '1.21':
tag='0'
break;
case '1.20':
tag='0'
break;
case '1.19':
tag='1'
break;
case '1.18':
tag='3'
break;
case '1.17':
tag='4'
break;
case '1.16':
tag='7'
break;
}
// Setup documented here: https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html
// Tested ca late 2021 on k8s 1.21.
sh """
kubectl apply -f https://raw.githubusercontent.com/kubernetes/autoscaler/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml
kubectl -n kube-system annotate deployment.apps/cluster-autoscaler cluster-autoscaler.kubernetes.io/safe-to-evict="false"
sleep 5
kubectl -n kube-system get deployment.apps/cluster-autoscaler -o json | \\
jq | \\
sed 's/<YOUR CLUSTER NAME>/${params.cluster}/g' | \\
jq '.spec.template.spec.containers[0].command += ["--balance-similar-node-groups","--skip-nodes-with-system-pods=false"]' | \\
kubectl apply -f -
kubectl -n kube-system set image deployment.apps/cluster-autoscaler cluster-autoscaler=${gregion}.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v${params.k8s_version}.${tag}
"""
}
// See: https://aws.amazon.com/premiumsupport/knowledge-center/eks-access-kubernetes-services/
// Also https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-helm/
// Switched to helm install late 2021 to simplify install across different k8s versions.
if (params.nginx_ingress == true) {
echo "Setting up nginx ingress and load balancer."
sh """
helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
helm install nginx-ingress nginx-stable/nginx-ingress --namespace nginx-ingress --create-namespace
kubectl apply -f nginx-ingress-proxy.yaml
echo "Dns name of nginx ingress load balancer is below:"
kubectl get svc --namespace=nginx-ingress
"""
}
// Updated cert-manager version installed late 2021
if (params.cert_manager == true) {
echo "Setting up cert-manager."
sh """
helm repo add jetstack https://charts.jetstack.io || true
helm repo update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.5.3 --set installCRDs=true --create-namespace
sleep 30 # allow cert-manager setup in the cluster
kubectl apply -f cluster-issuer-le-staging.yaml
kubectl apply -f cluster-issuer-le-prod.yaml
"""
}
}
}
}
}
stage('TF Destroy') {
when {
expression { params.action == 'destroy' }
}
steps {
script {
input "Destroy Terraform stack ${params.cluster} in aws?"
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding',
credentialsId: params.credential,
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh """
aws eks update-kubeconfig --name ${params.cluster} --region ${params.region}
# Some of these helm charts may not be installed; just try and remove them anyway
helm uninstall prometheus --namespace prometheus || true
helm uninstall cert-manager --namespace cert-manager || true
kubectl delete -f nginx-ingress-proxy.yaml || true
helm uninstall nginx-ingress --namespace nginx-ingress || true
sleep 20
terraform workspace select ${params.cluster}
terraform destroy -auto-approve
"""
}
}
}
}
}
}