c12s-kubespray/roles/network_plugin/contiv/tasks/pre-reset.yml
2018-09-17 16:46:19 +02:00

67 lines
1.9 KiB
YAML

---
- name: reset | Check that kubectl is still here
stat:
path: "{{ bin_dir }}/kubectl"
register: contiv_kubectl
- name: reset | Delete contiv netplugin and netmaster daemonsets
kube:
name: "{{ item }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: absent
with_items:
- contiv-netplugin
- contiv-netmaster
register: contiv_cleanup_deletion
tags:
- network
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Copy contiv temporary cleanup script
copy:
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
dest: /opt/cni/bin/cleanup
owner: root
group: root
mode: 0750
when:
- contiv_kubectl.stat.exists
- name: reset | Lay down contiv cleanup template
template:
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
register: contiv_cleanup_manifest
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Start contiv cleanup resources
kube:
name: "contiv-cleanup"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: latest
filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
ignore_errors: true
- name: reset | Wait until contiv cleanup is done
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
register: cleanup_done_all_nodes
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
retries: 5
delay: 5
ignore_errors: true
changed_when: false
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]