c12s-kubespray/roles/upgrade/pre-upgrade/tasks/main.yml

131 lines
4.5 KiB
YAML
Raw Normal View History

2016-12-09 18:44:52 +00:00
---
# Wait for upgrade
- name: Confirm node upgrade
pause:
echo: yes
prompt: "Ready to upgrade node? (Press Enter to continue or Ctrl+C for other options)"
when:
- upgrade_node_confirm
- name: Wait before upgrade node
pause:
seconds: "{{ upgrade_node_pause_seconds }}"
when:
- not upgrade_node_confirm
- upgrade_node_pause_seconds != 0
2018-02-09 15:39:21 +00:00
# Node Ready: type = ready, status = True
# Node NotReady: type = ready, status = Unknown
2017-03-22 13:19:27 +00:00
- name: See if node is in ready state
command: >
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
2018-02-09 15:39:21 +00:00
register: kubectl_node_ready
delegate_to: "{{ groups['kube_control_plane'][0] }}"
2018-02-09 15:39:21 +00:00
failed_when: false
changed_when: false
2018-02-09 15:39:21 +00:00
# SchedulingDisabled: unschedulable = true
# else unschedulable key doesn't exist
- name: See if node is schedulable
command: >
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ .spec.unschedulable }'
2018-02-09 15:39:21 +00:00
register: kubectl_node_schedulable
delegate_to: "{{ groups['kube_control_plane'][0] }}"
failed_when: false
changed_when: false
2017-03-22 13:19:27 +00:00
2019-05-16 07:27:43 +00:00
- name: Set if node needs cordoning
set_fact:
2017-03-22 13:19:27 +00:00
needs_cordoning: >-
{% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%}
2017-03-22 13:19:27 +00:00
true
{%- else -%}
2017-03-22 13:19:27 +00:00
false
{%- endif %}
2016-12-09 18:44:52 +00:00
- name: Node draining
block:
- name: Cordon node
command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
changed_when: true
2016-12-09 18:44:52 +00:00
- name: Check kubectl version
command: "{{ kubectl }} version --client --short"
register: kubectl_version
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: yes
changed_when: false
when:
- drain_nodes
- drain_pod_selector
- name: Ensure minimum version for drain label selector if necessary
assert:
that: "kubectl_version.stdout.split(' ')[-1] is version('v1.10.0', '>=')"
when:
- drain_nodes
- drain_pod_selector
- name: Drain node
command: >-
{{ kubectl }} drain
--force
--ignore-daemonsets
--grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }}
--timeout {{ hostvars['localhost']['drain_timeout_after_failure'] | default(drain_timeout) }}
--delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }}
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
when: drain_nodes
register: result
failed_when:
- result.rc != 0
- not drain_fallback_enabled
until: result.rc == 0
retries: "{{ drain_retries }}"
delay: "{{ drain_retry_delay_seconds }}"
- name: Drain fallback
block:
- name: Set facts after regular drain has failed
set_fact:
drain_grace_period_after_failure: "{{ drain_fallback_grace_period }}"
drain_timeout_after_failure: "{{ drain_fallback_timeout }}"
delegate_to: localhost
delegate_facts: yes
run_once: yes
- name: Drain node - fallback with disabled eviction
command: >-
{{ kubectl }} drain
--force
--ignore-daemonsets
--grace-period {{ drain_fallback_grace_period }}
--timeout {{ drain_fallback_timeout }}
--delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }}
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
--disable-eviction
register: drain_fallback_result
until: drain_fallback_result.rc == 0
retries: "{{ drain_fallback_retries }}"
delay: "{{ drain_fallback_retry_delay_seconds }}"
changed_when: drain_fallback_result.rc == 0
when:
- drain_nodes
- drain_fallback_enabled
- result.rc != 0
rescue:
- name: Set node back to schedulable
command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
when: upgrade_node_uncordon_after_drain_failure
- name: Fail after rescue
fail:
msg: "Failed to drain node {{ kube_override_hostname|default(inventory_hostname) }}"
when: upgrade_node_fail_if_drain_fails
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning