-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathupgrade-node-tasks.yaml
159 lines (129 loc) · 3.62 KB
/
upgrade-node-tasks.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
- name: Update and upgrade apt packages
apt:
upgrade: yes
update_cache: yes
- name: unhold kubeadm
shell: apt-mark unhold kubeadm
- name: install new kubeadm version
apt:
name: "kubeadm={{ upgrade_version }}-00"
allow_downgrade: yes
- name: hold kubeadm
shell: apt-mark hold kubeadm
- name: kubeadm version
shell: kubeadm version -o yaml
register: kubeadm_version
- name: show kubeadm version
debug:
msg: "{{ kubeadm_version.stdout }}"
- name: kubeadm upgrade plan
shell: kubeadm upgrade plan
register: kubeadm_upgrade_plan
when: ansible_hostname == groups.control[0]
- name: show kubeadm upgrade plan
debug:
msg: "{{ kubeadm_upgrade_plan.stdout }}"
when: ansible_hostname == groups.control[0]
- name: kubeadm upgrade first control plane node
shell: "kubeadm upgrade apply v{{ upgrade_version }} -y"
register: kubeadm_upgrade_first_cp
when: ansible_hostname == groups.control[0]
- name: show kubeadm upgrade first control plane node
debug:
msg: "{{ kubeadm_upgrade_first_cp.stdout }}"
when: ansible_hostname == groups.control[0]
- name: create .kube directory
become_user: ubuntu
file:
path: $HOME/.kube
state: directory
mode: 0755
register: kube_path
when: ansible_hostname == groups.control[0]
- name: copy admin.conf to .kube dir
copy:
src: /etc/kubernetes/admin.conf
dest: "{{kube_path.path}}/config"
remote_src: yes
owner: ubuntu
when: ansible_hostname == groups.control[0]
- name: copy admin.conf to local
fetch:
src: /etc/kubernetes/admin.conf
dest: "~/.kube/config"
flat: yes
run_once: true
when: ansible_hostname == groups.control[0]
- name: kubeadm upgrade node
shell: "kubeadm upgrade node"
register: kubeadm_upgrade
when: ansible_hostname != groups.control[0]
- name: show kubeadm upgrade node
debug:
msg: "{{ kubeadm_upgrade.stdout }}"
when: ansible_hostname != groups.control[0]
- name: drain node
delegate_to: localhost
become: false
kubernetes.core.k8s_drain:
state: drain
name: "{{ inventory_hostname }}"
delete_options:
ignore_daemonsets: true
# force: true
delete_emptydir_data: true
- name: unhold kubelet & kubectl
shell: apt-mark unhold kubelet kubectl
- name: install kubelet & kubectl
apt:
name:
- "kubelet={{ upgrade_version }}-00"
- "kubectl={{ upgrade_version }}-00"
update_cache: yes
- name: hold kubelet & kubectl
shell: apt-mark hold kubelet kubectl
- name: kubelet version
shell: kubelet --version
register: kubelet_version
- name: show kubelet version
debug:
msg: "{{ kubelet_version.stdout }}"
- name: pause
pause:
seconds: 15
when: ansible_hostname == groups.control[0]
- name: kubectl server version
become: false
shell: kubectl version -o yaml
register: kubectl_server_version
when: ansible_hostname == groups.control[0]
- name: show kubectl server version
debug:
msg: "{{ kubectl_server_version.stdout }}"
when: ansible_hostname == groups.control[0]
- name: kubectl client version
become: false
shell: kubectl version --client -o yaml
register: kubectl_client_version
- name: show kubectl client version
debug:
msg: "{{ kubectl_client_version.stdout }}"
- name: daemon-reload
systemd:
daemon_reload: yes
- name: restart kubelet
service:
name: kubelet
state: restarted
- name: uncordon node
delegate_to: localhost
become: false
kubernetes.core.k8s_drain:
state: uncordon
name: "{{ inventory_hostname }}"
- name: Remove useless packages from the cache
apt:
autoclean: yes
- name: Remove dependencies that are no longer required
apt:
autoremove: yes