add tear down playbook
This commit is contained in:
parent
c39835628d
commit
f703814561
4 changed files with 113 additions and 0 deletions
|
@ -9,3 +9,8 @@ Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml
|
|||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
|
4
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
4
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
roles:
|
||||
- { role: tear-down }
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install lvm utils (Debian)"
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
- name: "Get volume group information."
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
shell: "pvs {{ disk }} --option vg_name | tail -n+2"
|
||||
vars: { disk: "{{ hostvars[node]['disk_volume_device_1'] }}" }
|
||||
register: "volume_groups"
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
- name: "Remove volume groups."
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
command: "vgremove {{ volume_group }} --yes"
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
command: "pvremove {{ disk }} --yes"
|
||||
vars: { disk: "{{ hostvars[node]['disk_volume_device_1'] }}" }
|
||||
ignore_errors: true
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
delegate_to: "{{ node }}"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian'"
|
|
@ -0,0 +1,53 @@
|
|||
---
|
||||
- name: "Tear down heketi."
|
||||
command: "kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi."
|
||||
command: "kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "../provision/tasks/setup/tear-down-bootstrap.yml"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Tear down glusterfs."
|
||||
command: "kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service."
|
||||
command: "kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding"
|
||||
command: "kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret"
|
||||
command: "kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup"
|
||||
command: "kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account"
|
||||
command: "kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
command: "kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: "Remove heketi storage secret"
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true
|
||||
- name: "Prepare cluster disks."
|
||||
include_tasks: "disks.yml"
|
||||
with_items: "{{ groups['heketi-node'] }}"
|
||||
loop_control:
|
||||
loop_var: "node"
|
Loading…
Reference in a new issue