Enable upgrade to kubeadm
This commit is contained in:
parent
1067595b5c
commit
2d1106fbed
3 changed files with 50 additions and 4 deletions
|
@ -0,0 +1,4 @@
|
||||||
|
---
|
||||||
|
- name: kubeadm | Purge old certs
|
||||||
|
file:
|
||||||
|
command: "rm -f {{kube_cert_dir }}/*.pem"
|
12
roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
Normal file
12
roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- name: Copy old certs to the kubeadm expected path
|
||||||
|
copy:
|
||||||
|
src: "{{ kube_cert_dir }}/{{ item.src }}"
|
||||||
|
dest: "{{ kube_cert_dir }}/{{ item.dest }}"
|
||||||
|
remote_src: yes
|
||||||
|
with_items:
|
||||||
|
- {src: apiserver.pem, dest: apiserver.crt}
|
||||||
|
- {src: apiserver.pem, dest: apiserver.key}
|
||||||
|
- {src: ca.pem, dest: ca.crt}
|
||||||
|
- {src: ca-key.pem, dest: ca.key}
|
||||||
|
register: kubeadm_copy_old_certs
|
|
@ -1,4 +1,34 @@
|
||||||
---
|
---
|
||||||
|
- name: kubeadm | Check if old apiserver cert exists on host
|
||||||
|
stat:
|
||||||
|
path: "{{ kube_cert_dir }}/apiserver.pem"
|
||||||
|
register: old_apiserver_cert
|
||||||
|
delegate_to: "{{groups['kube-master']|first}}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: kubeadm | Check if kubeadm has already run
|
||||||
|
stat:
|
||||||
|
path: "{{ kube_config_dir }}/admin.conf"
|
||||||
|
register: admin_conf
|
||||||
|
|
||||||
|
- name: kubeadm | Migrate certificates to prepare for kubeadm
|
||||||
|
include: kubeadm-migrate-certs.yml
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master']|first
|
||||||
|
- old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
|
- name: kubeadm | Delete old static pods
|
||||||
|
file:
|
||||||
|
path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
|
||||||
|
state: absent
|
||||||
|
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
|
when: old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
|
- name: kubeadm | Forcefully delete old static pods
|
||||||
|
shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
|
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
|
when: old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
- name: kubeadm | aggregate all SANs
|
- name: kubeadm | aggregate all SANs
|
||||||
set_fact:
|
set_fact:
|
||||||
apiserver_sans: >-
|
apiserver_sans: >-
|
||||||
|
@ -29,10 +59,6 @@
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||||
register: kubeadm_config
|
register: kubeadm_config
|
||||||
|
|
||||||
- name: Check if kubeadm has already run
|
|
||||||
stat:
|
|
||||||
path: "{{ kube_config_dir }}/admin.conf"
|
|
||||||
register: admin_conf
|
|
||||||
|
|
||||||
|
|
||||||
- name: kubeadm | Initialize first master
|
- name: kubeadm | Initialize first master
|
||||||
|
@ -80,3 +106,7 @@
|
||||||
#Retry is because upload config sometimes fails
|
#Retry is because upload config sometimes fails
|
||||||
retries: 3
|
retries: 3
|
||||||
when: inventory_hostname != groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists or copy_kubeadm_certs.changed)
|
when: inventory_hostname != groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists or copy_kubeadm_certs.changed)
|
||||||
|
|
||||||
|
- name: kubeadm | cleanup old certs if necessary
|
||||||
|
include: kubeadm_cleanup_old_certs.yml
|
||||||
|
when: old_apiserver_cert.stat.exists
|
||||||
|
|
Loading…
Reference in a new issue