-
Notifications
You must be signed in to change notification settings - Fork 3
/
.gitlab-ci.yml
186 lines (170 loc) · 8.19 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# To use the Docker Hub docker image
#image: docker:latest
image: $CI_REGISTRY/mouse-informatics/docker:latest
variables:
# When using dind service we need to instruct docker, to talk with the
# daemon started inside of the service. The daemon is available with
# a network connection instead of the default /var/run/docker.sock socket.
#
# The 'docker' hostname is the alias of the service container as described at
# https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#accessing-the-services
#
# Note that if you're using the Kubernetes executor, the variable should be set to
# tcp://localhost:2375/ because of how the Kubernetes executor connects services
# to the job container
# DOCKER_HOST: tcp://localhost:2375/
#
# For non-Kubernetes executors, we use tcp://docker:2375/
DOCKER_HOST: tcp://docker:2375/
# When using dind, it's wise to use the overlayfs driver for
# improved performance.
DOCKER_DRIVER: overlay2
# Since the docker:dind container and the runner container don’t share their root
# filesystem, the job’s working directory can be used as a mount point for children
# containers. For example, if you have files you want to share with a child container,
# you may create a subdirectory under /builds/$CI_PROJECT_PATH and use it as your
# mount point.
MOUNT_POINT: /builds/$CI_PROJECT_PATH/mnt
# For EBI you need to override the definition of CI_REGISTRY to remove the port number
CI_REGISTRY: dockerhub.ebi.ac.uk
CI_REGISTRY_IMAGE: $CI_REGISTRY/$CI_PROJECT_PATH
#NOW: $(date '+%Y-%m-%d-%H-%M-%S')
#NOW: $(date '+%Y-%m-%d')
# To solve the issue with the Docker in Docker 19.03 service.
# Logged as: GitLab.com CI jobs failing if using docker:stable-dind image
# see: https://gitlab.com/gitlab-com/gl-infra/production/issues/982
DOCKER_TLS_CERTDIR: ""
stages:
- build-dev
- deploy-dev
- build-prod
- deploy-prod
build_dev_image:
stage: build-dev
services:
- name: $CI_REGISTRY/mouse-informatics/dind:latest
alias: docker
except:
- schedules
before_script:
- sed -i "s|FROM node|FROM ${LOCAL_GITLAB_NODE_IMAGE}|g" Dockerfile
- sed -i "s|FROM nginx|FROM ${LOCAL_GITLAB_NGINX_IMAGE}|g" Dockerfile
- mkdir -p "$MOUNT_POINT"
- echo "${CI_REGISTRY_PASSWORD}" | docker login -u "${CI_REGISTRY_USER}" --password-stdin ${CI_REGISTRY}
- rm .env.production && cp .env.development .env.production
script:
- |
if [[ "${REQUIRES_REBUILD_IMAGE}" == "true" ]]; then
echo "Building dev image: ${CI_REGISTRY_IMAGE}:dev"
docker build -t "${CI_REGISTRY_IMAGE}":dev -f Dockerfile . | tee ${MOUNT_POINT}/build.log
docker push "${CI_REGISTRY_IMAGE}":dev | tee ${MOUNT_POINT}/push.log
docker logout ${CI_REGISTRY}
echo "Pushing to docker hub"
echo "${DOCKER_HUB_PWD}" | docker login --username "${DOCKER_HUB_USER}" --password-stdin
docker tag "${CI_REGISTRY_IMAGE}":dev "${DOCKER_HUB_USER}"/"${DOCKER_HUB_REPO}":dev
docker push "${DOCKER_HUB_USER}"/"${DOCKER_HUB_REPO}":dev | tee ${MOUNT_POINT}/dockerhub-push-latest.log
fi
- docker logout
artifacts:
paths:
- "$MOUNT_POINT/"
only:
refs:
- dev
build_prod_image:
stage: build-prod
services:
- name: $CI_REGISTRY/mouse-informatics/dind:latest
alias: docker
except:
- schedules
before_script:
- mkdir -p "$MOUNT_POINT"
- echo "${CI_REGISTRY_PASSWORD}" | docker login -u "${CI_REGISTRY_USER}" --password-stdin ${CI_REGISTRY}
script:
- sed -i "s|FROM node|FROM ${LOCAL_GITLAB_NODE_IMAGE}|g" Dockerfile
- sed -i "s|FROM nginx|FROM ${LOCAL_GITLAB_NGINX_IMAGE}|g" Dockerfile
- |
if [[ "${REQUIRES_REBUILD_IMAGE}" == "true" ]]; then
echo "Building prod image"
docker build -t "${CI_REGISTRY_IMAGE}":prod -f Dockerfile . | tee ${MOUNT_POINT}/build.log
docker push "${CI_REGISTRY_IMAGE}":prod | tee ${MOUNT_POINT}/push.log
docker logout ${CI_REGISTRY}
echo "Pushing to docker hub"
echo "${DOCKER_HUB_PWD}" | docker login --username "${DOCKER_HUB_USER}" --password-stdin
docker tag "${CI_REGISTRY_IMAGE}":prod "${DOCKER_HUB_USER}"/"${DOCKER_HUB_REPO}":prod
docker push "${DOCKER_HUB_USER}"/"${DOCKER_HUB_REPO}":prod | tee ${MOUNT_POINT}/dockerhub-push-latest.log
fi
- docker logout
artifacts:
paths:
- "$MOUNT_POINT/"
only:
refs:
- main
deploy-HH-WP-WEBADMIN-dev:
stage: deploy-dev
image: dtzar/helm-kubectl:2.13.0
script:
- kubectl config set-cluster local --server="${HH_WP_WEBADMIN_ENDPOINT}"
- kubectl config set clusters.local.certificate-authority-data "${HH_WP_WEBADMIN_CERTIFICATE_AUTHORITY}"
- kubectl config set-credentials ${HH_WP_WEBADMIN_DEV_USER} --token="${HH_WP_WEBADMIN_DEV_USER_TOKEN}"
- kubectl config set-context "${HH_WP_WEBADMIN_DEV_NAMESPACE}" --cluster=local --user=${HH_WP_WEBADMIN_DEV_USER} --namespace="${HH_WP_WEBADMIN_DEV_NAMESPACE}"
- kubectl config use-context "${HH_WP_WEBADMIN_DEV_NAMESPACE}"
- kubectl version
- |
if kubectl apply -f k8-deploy/nodeport_deploy/dev/deployment-dev.yml --record | grep -q unchanged; then
echo "=> Patching deployment to force image update."
kubectl patch -f k8-deploy/nodeport_deploy/dev/deployment-dev.yml --record -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"ci-last-updated\":\"$(date +'%s')\"}}}}}"
else
echo "=> Deployment apply has changed the object, no need to force image update."
fi
- kubectl rollout status -f k8-deploy/nodeport_deploy/dev/deployment-dev.yml
- kubectl get pods,service,deploy,replicaset,ing
only:
refs:
- dev
deploy-HH-WP-WEBADMIN-prod:
stage: deploy-prod
image: dtzar/helm-kubectl:2.13.0
script:
- kubectl config set-cluster local --server="${HH_WP_WEBADMIN_ENDPOINT}"
- kubectl config set clusters.local.certificate-authority-data "${HH_WP_WEBADMIN_CERTIFICATE_AUTHORITY}"
- kubectl config set-credentials ${HH_WP_WEBADMIN_PROD_USER} --token="${HH_WP_WEBADMIN_PROD_USER_TOKEN}"
- kubectl config set-context "${HH_WP_WEBADMIN_PROD_NAMESPACE}" --cluster=local --user=${HH_WP_WEBADMIN_PROD_USER} --namespace="${HH_WP_WEBADMIN_PROD_NAMESPACE}"
- kubectl config use-context "${HH_WP_WEBADMIN_PROD_NAMESPACE}"
- kubectl version
- |
if kubectl apply -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml --record | grep -q unchanged; then
echo "=> Patching deployment to force image update."
kubectl patch -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml --record -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"ci-last-updated\":\"$(date +'%s')\"}}}}}"
else
echo "=> Deployment apply has changed the object, no need to force image update."
fi
- kubectl rollout status -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml
- kubectl get pods,service,deploy,replicaset,ing
only:
refs:
- main
deploy-HX-WP-WEBADMIN-prod:
stage: deploy-prod
image: dtzar/helm-kubectl:2.13.0
script:
- kubectl config set-cluster local --server="${HX_WP_WEBADMIN_ENDPOINT}"
- kubectl config set clusters.local.certificate-authority-data "${HX_WP_WEBADMIN_CERTIFICATE_AUTHORITY}"
- kubectl config set-credentials ${HX_WP_WEBADMIN_PROD_USER} --token="${HX_WP_WEBADMIN_PROD_USER_TOKEN}"
- kubectl config set-context "${HX_WP_WEBADMIN_PROD_NAMESPACE}" --cluster=local --user=${HX_WP_WEBADMIN_PROD_USER} --namespace="${HX_WP_WEBADMIN_PROD_NAMESPACE}"
- kubectl config use-context "${HX_WP_WEBADMIN_PROD_NAMESPACE}"
- kubectl version
- |
if kubectl apply -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml --record | grep -q unchanged; then
echo "=> Patching deployment to force image update."
kubectl patch -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml --record -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"ci-last-updated\":\"$(date +'%s')\"}}}}}"
else
echo "=> Deployment apply has changed the object, no need to force image update."
fi
- kubectl rollout status -f k8-deploy/nodeport_deploy/prod/deployment-prod.yml
- kubectl get pods,service,deploy,replicaset,ing
only:
refs:
- main