-
Notifications
You must be signed in to change notification settings - Fork 1
/
provisioner_user_data.tpl
226 lines (193 loc) · 8.26 KB
/
provisioner_user_data.tpl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#cloud-config
mounts:
- [ nvme1n1, /var/lib/docker, auto, "defaults,noexec", "0", "0" ]
package_update: true
package_reboot_if_required: false
packages:
- wget
- net-tools
- bind-utils
- iptables-services
- bridge-utils
- bash-completion
- kexec-tools
- sos
- psacct
- git
- pyOpenSSL
- ansible
- python-docker-py
- skopeo
write_files:
- path: /var/provisioner/ec2.ini
content: |
[ec2]
regions = us-east-1
regions_exclude = us-gov-west-1, cn-north-1
destination_variable = private_dns_name
vpc_destination_variable = private_ip_address
hostname_variable = private_dns_name
# destination_variable = Name
# vpc_destination_variable = Name
# hotname_variable = Name
# pattern_include = openshift*
instance_filters = tag:Environment=${environment}
route53 = False
all_instances = False
all_rds_instances = False
# Include RDS cluster information (Aurora etc.)
include_rds_clusters = False
all_elasticache_replication_groups = False
all_elasticache_clusters = False
all_elasticache_nodes = False
cache_path = ~/.ansible/tmp
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = True
# If set to true, any tag of the form "a,b,c" is expanded into a list
# and the results are used to create additional tag_* inventory groups.
expand_csv_tags = False
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
group_by_region = True
group_by_availability_zone = True
group_by_aws_account = False
group_by_ami_id = True
group_by_instance_type = True
group_by_instance_state = False
group_by_platform = True
group_by_key_pair = True
group_by_vpc_id = True
group_by_security_group = True
group_by_tag_keys = True
group_by_tag_none = True
group_by_route53_names = True
group_by_rds_engine = True
group_by_rds_parameter_group = True
group_by_elasticache_engine = True
group_by_elasticache_cluster = True
group_by_elasticache_parameter_group = True
group_by_elasticache_replication_group = True
stack_filters = False
[credentials]
- path: /root/.ssh/config
content: |
Host *
user centos
StrictHostKeyChecking no
ProxyCommand none
CheckHostIP no
ForwardAgent yes
IdentityFile /root/.ssh/id_rsa
- path: /etc/amazon/ssm/seelog.xml
content: |
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000" critmsgcount="500" minlevel="info">
<exceptions>
<exception filepattern="test*" minlevel="error"/>
</exceptions>
<outputs formatid="fmtinfo">
<console formatid="fmtinfo"/>
<rollingfile type="size" filename="/var/log/amazon/ssm/amazon-ssm-agent.log" maxsize="30000000" maxrolls="5"/>
<filter levels="error,critical" formatid="fmterror">
<rollingfile type="size" filename="/var/log/amazon/ssm/errors.log" maxsize="10000000" maxrolls="5"/>
</filter>
<custom name="cloudwatch_receiver" formatid="fmtdebug" data-log-group="${log_group}"/>
</outputs>
<formats>
<format id="fmterror" format="%Date %Time %LEVEL [%FuncShort @ %File.%Line] %Msg%n"/>
<format id="fmtdebug" format="%Date %Time %LEVEL [%FuncShort @ %File.%Line] %Msg%n"/>
<format id="fmtinfo" format="%Date %Time %LEVEL %Msg%n"/>
</formats>
</seelog>
- path: /bin/provisioner.sh
content: |
#!/bin/bash
EC2_INI_PATH=/var/provisioner/ec2.ini
if aws sqs receive-message --queue-url ${sqs} --visibility-timeout 0 --region ${region} |grep -i body
then
echo New provision required
aws sqs purge-queue --queue-url ${sqs} --region ${region}
sleep 140
while pgrep ansible-playbook > /dev/null
do
sleep 2
echo waiting process finish
done
# This is only suitable for 3.7 and prior
#while ansible-playbook -i /var/provisioner /openshift-ansible/playbooks/byo/config.yml|tee -a /var/provisioner/provisioner.log|grep -i "Failure summary"
#do
# echo Provison attempt
#done
# 3.9 magic - note that this may need to be run several times.
ansible-playbook -i /var/provisioner /openshift-ansible/playbooks/prerequisites.yml >> /var/provisioner/provisioner.log && ansible-playbook -i /var/provisioner /openshift-ansible/playbooks/deploy_cluster.yml >> /var/provisioner/provisioner.log
fi
- path: /var/provisioner/ansiblehosts
content: |
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
openshift_disable_check=memory_availability, disk_availability, docker_storage
# SSH user, this user should allow ssh based auth without requiring a password
ansible_user=centos
openshift_clock_enabled=true
openshift_docker_insecure_registries=['172.30.0.0/16']
openshift_master_default_subdomain=public.${master_public_fqdn}
openshift_public_hostname=${master_public_fqdn}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=${master_private_fqdn}
openshift_master_cluster_public_hostname=${master_public_fqdn}
openshift_schedulable=true
openshift_web_console_nodeselector={"region": "infra"}
#Schedule only on nodes with below label
#osm_default_node_selector='app=true'
# If ansible_ssh_user is not root, ansible_become must be set to true
ansible_become=true
openshift_deployment_type=origin
[tag_aws_autoscaling_groupName_${master_asg_name}]
[tag_aws_autoscaling_groupName_${infra_asg_name}]
[tag_aws_autoscaling_groupName_${app_asg_name}]
# host group for masters
[masters:children]
tag_aws_autoscaling_groupName_${master_asg_name}
[masters:vars]
# openshift_schedulable=true
# host group for etcd
[etcd:children]
tag_aws_autoscaling_groupName_${master_asg_name}
# host group for nodes, includes region info
[nodes:children]
tag_aws_autoscaling_groupName_${master_asg_name}
infra
app
[infra:children]
tag_aws_autoscaling_groupName_${infra_asg_name}
[infra:vars]
openshift_node_labels="{'region': 'infra'}"
[app:children]
tag_aws_autoscaling_groupName_${app_asg_name}
[app:vars]
openshift_node_labels="{'app': 'true'}"
bootcmd:
- mkdir /var/provisioner
runcmd:
- mkfs.xfs /dev/nvme1n1
- mount -a
- git clone -b release-3.9 https://github.com/openshift/openshift-ansible
- wget -O /var/provisioner/ec2.py https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/ec2.py
- wget -O /bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x /bin/jq
- yum install -y https://s3-${region}.amazonaws.com/amazon-ssm-${region}/latest/linux_amd64/amazon-ssm-agent.rpm && systemctl start amazon-ssm-agent && systemctl enable amazon-ssm-agent
- chmod +x /var/provisioner/ec2.py /bin/provisioner.sh
- curl -O https://bootstrap.pypa.io/get-pip.py && python get-pip.py && pip install awscli boto boto3 && pip2 install awscli boto boto3
- ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa && rm -f /root/.ssh/id_rsa.pub
- aws ssm get-parameters --names "${environment}.provisioner_id_rsa" --with-decryption --region ${region} --output json|jq -r '.|{Parameters}[][]|.Value'>/root/.ssh/id_rsa
- aws ssm send-command --document-name ${ssm} --targets Key=tag:Name,Values=provisioner --region ${region}
- echo Make sure we have Ansible 2.4.3
- rpm -Uvh https://releases.ansible.com/ansible/rpm/release/epel-7-x86_64/ansible-2.4.3.0-1.el7.ans.noarch.rpm
- /bin/provisioner.sh