Purge legacy cleanup tasks from older than 1 year (#4450)
We don't need to support upgrades from 2 year old installs, just from the last major version. Also changed most retried tasks to 1s delay instead of longer.
This commit is contained in:
parent
d25ecfe1c1
commit
fc072300ea
11 changed files with 23 additions and 170 deletions
|
@ -5,7 +5,6 @@
|
||||||
- Docker | reload systemd
|
- Docker | reload systemd
|
||||||
- Docker | reload docker.socket
|
- Docker | reload docker.socket
|
||||||
- Docker | reload docker
|
- Docker | reload docker
|
||||||
- Docker | pause while Docker restarts
|
|
||||||
- Docker | wait for docker
|
- Docker | wait for docker
|
||||||
|
|
||||||
- name: Docker | reload systemd
|
- name: Docker | reload systemd
|
||||||
|
@ -23,14 +22,9 @@
|
||||||
name: docker
|
name: docker
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: Docker | pause while Docker restarts
|
|
||||||
pause:
|
|
||||||
seconds: 10
|
|
||||||
prompt: "Waiting for docker restart"
|
|
||||||
|
|
||||||
- name: Docker | wait for docker
|
- name: Docker | wait for docker
|
||||||
command: "{{ docker_bin_dir }}/docker images"
|
command: "{{ docker_bin_dir }}/docker images"
|
||||||
register: docker_ready
|
register: docker_ready
|
||||||
retries: 10
|
retries: 20
|
||||||
delay: 5
|
delay: 1
|
||||||
until: docker_ready.rc == 0
|
until: docker_ready.rc == 0
|
||||||
|
|
|
@ -40,8 +40,8 @@
|
||||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||||
register: result
|
register: result
|
||||||
until: result.status is defined and result.status == 200
|
until: result.status is defined and result.status == 200
|
||||||
retries: 10
|
retries: 60
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: wait for etcd-events up
|
- name: wait for etcd-events up
|
||||||
uri:
|
uri:
|
||||||
|
@ -51,8 +51,8 @@
|
||||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||||
register: result
|
register: result
|
||||||
until: result.status is defined and result.status == 200
|
until: result.status is defined and result.status == 200
|
||||||
retries: 10
|
retries: 60
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: set etcd_secret_changed
|
- name: set etcd_secret_changed
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -1,31 +1,4 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Delete old CoreDNS resources
|
|
||||||
kube:
|
|
||||||
name: "coredns"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'deploy'
|
|
||||||
- 'configmap'
|
|
||||||
- 'svc'
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete old nodelocalDNS resources
|
|
||||||
kube:
|
|
||||||
name: "nodelocaldns"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'deamonset'
|
|
||||||
- 'configmap'
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||||
kube:
|
kube:
|
||||||
name: "coredns"
|
name: "coredns"
|
||||||
|
@ -37,41 +10,3 @@
|
||||||
- kubeadm_init is defined
|
- kubeadm_init is defined
|
||||||
- kubeadm_init.changed|default(false)
|
- kubeadm_init.changed|default(false)
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete old KubeDNS resources
|
|
||||||
kube:
|
|
||||||
name: "kube-dns"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'deploy'
|
|
||||||
- 'svc'
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete kubeadm KubeDNS
|
|
||||||
kube:
|
|
||||||
name: "kube-dns"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'deploy'
|
|
||||||
- 'svc'
|
|
||||||
when:
|
|
||||||
- kubeadm_init is defined
|
|
||||||
- kubeadm_init.changed|default(false)
|
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete old KubeDNS Autoscaler deployment
|
|
||||||
kube:
|
|
||||||
name: "kubedns-autoscaler"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "deploy"
|
|
||||||
state: absent
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
|
@ -1,15 +1,4 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Delete old kubernetes-dashboard resources
|
|
||||||
kube:
|
|
||||||
name: "kubernetes-dashboard"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'ClusterRoleBinding'
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Lay down dashboard template
|
- name: Kubernetes Apps | Lay down dashboard template
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
client_key: "{{ kube_apiserver_client_key }}"
|
client_key: "{{ kube_apiserver_client_key }}"
|
||||||
register: result
|
register: result
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 10
|
retries: 20
|
||||||
delay: 2
|
delay: 1
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Cleanup DNS
|
- name: Kubernetes Apps | Cleanup DNS
|
||||||
|
|
|
@ -51,15 +51,6 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Purge old Netchecker server
|
|
||||||
kube:
|
|
||||||
name: "netchecker-server"
|
|
||||||
namespace: "{{ netcheck_namespace }}"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: "po"
|
|
||||||
state: absent
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start Netchecker Resources
|
- name: Kubernetes Apps | Start Netchecker Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
|
|
|
@ -1,23 +1,5 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Remove legacy addon dir and manifests
|
|
||||||
file:
|
|
||||||
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
|
||||||
state: absent
|
|
||||||
when:
|
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Remove legacy namespace
|
|
||||||
shell: |
|
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }}
|
|
||||||
ignore_errors: yes
|
|
||||||
when:
|
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
|
||||||
tags:
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Create addon dir
|
- name: NGINX Ingress Controller | Create addon dir
|
||||||
file:
|
file:
|
||||||
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
||||||
|
|
|
@ -43,23 +43,23 @@
|
||||||
- name: Master | Remove apiserver container
|
- name: Master | Remove apiserver container
|
||||||
shell: "docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
register: remove_apiserver_container
|
register: remove_apiserver_container
|
||||||
retries: 4
|
retries: 10
|
||||||
until: remove_apiserver_container.rc == 0
|
until: remove_apiserver_container.rc == 0
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: Master | Remove scheduler container
|
- name: Master | Remove scheduler container
|
||||||
shell: "docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
register: remove_scheduler_container
|
register: remove_scheduler_container
|
||||||
retries: 4
|
retries: 10
|
||||||
until: remove_scheduler_container.rc == 0
|
until: remove_scheduler_container.rc == 0
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: Master | Remove controller manager container
|
- name: Master | Remove controller manager container
|
||||||
shell: "docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
register: remove_cm_container
|
register: remove_cm_container
|
||||||
retries: 4
|
retries: 10
|
||||||
until: remove_cm_container.rc == 0
|
until: remove_cm_container.rc == 0
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: Master | wait for kube-scheduler
|
- name: Master | wait for kube-scheduler
|
||||||
uri:
|
uri:
|
||||||
|
@ -67,15 +67,15 @@
|
||||||
register: scheduler_result
|
register: scheduler_result
|
||||||
until: scheduler_result.status == 200
|
until: scheduler_result.status == 200
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: Master | wait for kube-controller-manager
|
- name: Master | wait for kube-controller-manager
|
||||||
uri:
|
uri:
|
||||||
url: http://localhost:10252/healthz
|
url: http://localhost:10252/healthz
|
||||||
register: controller_manager_result
|
register: controller_manager_result
|
||||||
until: controller_manager_result.status == 200
|
until: controller_manager_result.status == 200
|
||||||
retries: 15
|
retries: 60
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
||||||
- name: Master | wait for the apiserver to be running
|
- name: Master | wait for the apiserver to be running
|
||||||
uri:
|
uri:
|
||||||
|
@ -85,8 +85,8 @@
|
||||||
client_key: "{{ kube_apiserver_client_key }}"
|
client_key: "{{ kube_apiserver_client_key }}"
|
||||||
register: result
|
register: result
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 30
|
retries: 60
|
||||||
delay: 10
|
delay: 1
|
||||||
|
|
||||||
- name: Master | set secret_changed
|
- name: Master | set secret_changed
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
|
|
|
@ -1,29 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: "Pre-upgrade | etcd3 upgrade | see if old config exists"
|
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
||||||
command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions"
|
|
||||||
environment:
|
|
||||||
ETCDCTL_API: 2
|
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
|
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
|
|
||||||
register: old_data_exists
|
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
|
||||||
changed_when: false
|
|
||||||
when: kube_apiserver_storage_backend == "etcd3"
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | etcd3 upgrade | use etcd2 unless forced to etcd3"
|
|
||||||
set_fact:
|
|
||||||
kube_apiserver_storage_backend: "etcd2"
|
|
||||||
when: old_data_exists.rc == 0 and not force_etcd3|bool
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master manifests"
|
|
||||||
file:
|
file:
|
||||||
path: "/etc/kubernetes/manifests/{{item}}.manifest"
|
path: "/etc/kubernetes/manifests/{{item}}.manifest"
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
register: kube_apiserver_manifest_replaced
|
register: kube_apiserver_manifest_replaced
|
||||||
when: (secret_changed|default(false) or etcd_secret_changed|default(false))
|
when: etcd_secret_changed|default(false)
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master containers forcefully"
|
- name: "Pre-upgrade | Delete master containers forcefully"
|
||||||
shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
|
@ -31,6 +14,6 @@
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when: kube_apiserver_manifest_replaced.changed
|
when: kube_apiserver_manifest_replaced.changed
|
||||||
register: remove_master_container
|
register: remove_master_container
|
||||||
retries: 4
|
retries: 10
|
||||||
until: remove_master_container.rc == 0
|
until: remove_master_container.rc == 0
|
||||||
delay: 5
|
delay: 1
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
---
|
---
|
||||||
- import_tasks: pre-upgrade.yml
|
|
||||||
|
|
||||||
- name: Flannel | Create Flannel manifests
|
- name: Flannel | Create Flannel manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{item.file}}.j2"
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
---
|
|
||||||
- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file
|
|
||||||
file:
|
|
||||||
path: "/etc/systemd/system/docker.service.d/flannel-options.conf"
|
|
||||||
state: absent
|
|
||||||
notify:
|
|
||||||
- Flannel | delete default docker bridge
|
|
||||||
|
|
||||||
- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest
|
|
||||||
file:
|
|
||||||
path: "{{ kube_manifest_dir }}/flannel-pod.manifest"
|
|
||||||
state: absent
|
|
||||||
notify:
|
|
||||||
- Flannel | delete flannel interface
|
|
||||||
|
|
||||||
- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI
|
|
||||||
file:
|
|
||||||
dest: "{{ flannel_cert_dir }}"
|
|
||||||
state: absent
|
|
Loading…
Reference in a new issue