-
Notifications
You must be signed in to change notification settings - Fork 46
/
Copy pathcicd-lab-pipeline.groovy
356 lines (316 loc) · 16.6 KB
/
cicd-lab-pipeline.groovy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
/**
*
* Launch heat stack with CI/CD lab infrastructure
*
* Expected parameters:
* HEAT_TEMPLATE_URL URL to git repo with Heat templates
* HEAT_TEMPLATE_CREDENTIALS Credentials to the Heat templates repo
* HEAT_TEMPLATE_BRANCH Heat templates repo branch
* HEAT_STACK_NAME Heat stack name
* HEAT_STACK_TEMPLATE Heat stack HOT template
* HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
* HEAT_STACK_ZONE Heat stack availability zone
* HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
* HEAT_STACK_DELETE Delete Heat stack when finished (bool)
* HEAT_STACK_CLEANUP_JOB Name of job for deleting Heat stack
* HEAT_STACK_REUSE Reuse Heat stack (don't create one)
*
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
* SALT_MASTER_PORT Port of salt-api, defaults to 8000
*
* OPENSTACK_API_URL OpenStack API address
* OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
* OPENSTACK_API_PROJECT OpenStack project to connect to
* OPENSTACK_API_CLIENT Versions of OpenStack python clients
* OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
*
*/
common = new com.mirantis.mk.Common()
git = new com.mirantis.mk.Git()
openstack = new com.mirantis.mk.Openstack()
salt = new com.mirantis.mk.Salt()
orchestrate = new com.mirantis.mk.Orchestrate()
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
_MAX_PERMITTED_STACKS = 2
timeout(time: 12, unit: 'HOURS') {
node {
try {
// connection objects
def openstackCloud
// value defaults
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
def openstackEnv = "${env.WORKSPACE}/venv"
try {
sshPubKey = SSH_PUBLIC_KEY
} catch (MissingPropertyException e) {
sshPubKey = false
}
if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
error("If you want to reuse existing stack you need to provide it's name")
}
if (HEAT_STACK_REUSE.toBoolean() == false) {
// Don't allow to set custom heat stack name
wrap([$class: 'BuildUser']) {
if (env.BUILD_USER_ID) {
HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
} else {
HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
}
currentBuild.description = HEAT_STACK_NAME
}
}
//
// Bootstrap
//
stage ('Download Heat templates') {
git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
}
stage('Install OpenStack CLI') {
openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
}
stage('Connect to OpenStack cloud') {
openstackCloud = openstack.createOpenstackEnv(openstackEnv,
OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
OPENSTACK_API_VERSION)
openstack.getKeystoneToken(openstackCloud, openstackEnv)
wrap([$class: 'BuildUser']) {
if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
HEAT_STACK_DELETE = "false"
throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
}
}
}
}
if (HEAT_STACK_REUSE.toBoolean() == false) {
stage('Launch new Heat stack') {
envParams = [
'cluster_zone': HEAT_STACK_ZONE,
'cluster_public_net': HEAT_STACK_PUBLIC_NET
]
openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
}
}
stage('Connect to Salt master') {
def saltMasterPort
try {
saltMasterPort = SALT_MASTER_PORT
} catch (MissingPropertyException e) {
saltMasterPort = 6969
}
saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
}
//
// Install
//
stage('Install core infra') {
// salt.master, reclass
// refresh_pillar
// sync_all
// linux,openssh,salt.minion.ntp
orchestrate.installFoundationInfra(pepperEnv)
orchestrate.validateFoundationInfra(pepperEnv)
}
stage("Deploy GlusterFS") {
salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
retry(2) {
salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
}
sleep(5)
salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
timeout(5) {
println "Waiting for GlusterFS volumes to get mounted.."
salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
}
print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
}
stage("Deploy GlusterFS") {
salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
}
stage("Setup Docker Swarm") {
salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
}
stage("Configure OSS services") {
salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
}
stage("Deploy Docker services") {
// We need /etc/aptly-publisher.yaml to be present before
// services are deployed
// XXX: for some weird unknown reason, refresh_pillar is
// required to execute here
salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
retry(3) {
sleep(5)
salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
}
// XXX: Workaround to have `/var/lib/jenkins` on all
// nodes where are jenkins_slave services are created.
salt.cmdRun(pepperEnv, 'I@docker:swarm', "mkdir -p /var/lib/jenkins")
}
stage("Configure CI/CD services") {
salt.syncAll(pepperEnv, '*')
// Aptly
timeout(10) {
println "Waiting for Aptly to come up.."
retry(2) {
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://apt.mcp.mirantis.net:8081/api/version >/dev/null && break; done')
}
}
salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
// OpenLDAP
timeout(10) {
println "Waiting for OpenLDAP to come up.."
salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
}
salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
// Gerrit
timeout(10) {
println "Waiting for Gerrit to come up.."
salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
}
salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
// Jenkins
timeout(10) {
println "Waiting for Jenkins to come up.."
salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
}
retry(2) {
// XXX: needs retry as first run installs python-jenkins
// thus make jenkins modules available for second run
salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
}
// Postgres client - initialize OSS services databases
timeout(300){
println "Waiting for postgresql database to come up.."
salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
}
// XXX: first run usually fails on some inserts, but we need to create databases at first
salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
// Setup postgres database with integration between
// Pushkin notification service and Security Monkey security audit service
timeout(10) {
println "Waiting for Pushkin to come up.."
salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
}
salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
// Rundeck
timeout(10) {
println "Waiting for Rundeck to come up.."
salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
}
salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
// Elasticsearch
timeout(10) {
println 'Waiting for Elasticsearch to come up..'
salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
}
retry(3){
sleep(10)
// XXX: first run sometimes fails on update indexes, so we need to wait
salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
}
}
stage("Finalize") {
//
// Deploy user's ssh key
//
def adminUser
def authorizedKeysFile
def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
if (adminUserCmdOut =~ /ubuntu user exists/) {
adminUser = "ubuntu"
authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
} else {
adminUser = "root"
authorizedKeysFile = "/root/.ssh/authorized_keys"
}
if (sshPubKey) {
println "Deploying provided ssh key at ${authorizedKeysFile}"
salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
}
//
// Generate docs
//
try {
try {
// Run sphinx state to install sphinx-build needed in
// upcomming orchestrate
salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
} catch (Throwable e) {
true
}
retry(3) {
// TODO: fix salt.orchestrateSystem
// print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
print common.prettyPrint(out)
if (out =~ /Command execution failed/) {
throw new Exception("Command execution failed")
}
}
} catch (Throwable e) {
// We don't want sphinx docs to ruin whole build, so possible
// errors are just ignored here
true
}
salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
if (failedSvc =~ /Command execution failed/) {
common.errorMsg("Some services are not running. Environment may not be fully functional!")
}
common.successMsg("""
============================================================
Your CI/CD lab has been deployed and you can enjoy it:
Use sshuttle to connect to your private subnet:
sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
And visit services running at 172.16.10.254 (vip address):
9600 HAProxy statistics
8080 Gerrit
8081 Jenkins
8089 LDAP administration
4440 Rundeck
8084 DevOps Portal
8091 Docker swarm visualizer
8090 Reclass-generated documentation
If you provided SSH_PUBLIC_KEY, you can use it to login,
otherwise you need to get private key connected to this
heat template.
DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
============================================================""")
}
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
} finally {
// Cleanup
if (HEAT_STACK_DELETE.toBoolean() == true) {
stage('Trigger cleanup job') {
build(job: 'deploy-stack-cleanup', parameters: [
[$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
[$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
])
}
}
}
}
}