Fix scaling (#5889)
* etcd: etcd-events doesn't depend on etcd_cluster_setup Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * etcd: remove condition already present on include_tasks Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * etcd: fix scaling up Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * etcd: use *access_addresses, do not delegate to etcd[0] We want to wait for the full cluster to be healthy, so use all the cluster addresses Also we should be able to run the playbook when etcd[0] is down (not tested), so do not delegate to etcd[0] Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * etcd: use failed_when for health check unhealthy cluster is expected on first run, so use failed_when instead of ignore_errors to remove scary red messages Also use run_once Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * kubernetes/preinstall: ensure ansible_fqdn is up to date after changing /etc/hosts Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com> * kubernetes/master: regenerate apiserver cert if needed Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
This commit is contained in:
parent
910a821d0b
commit
a35b6dc1af
5 changed files with 60 additions and 19 deletions
|
@ -2,9 +2,10 @@
|
|||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
run_once: yes
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
|
@ -16,9 +17,10 @@
|
|||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
run_once: yes
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
|
@ -49,22 +51,26 @@
|
|||
daemon_reload: true
|
||||
when: is_etcd_master
|
||||
|
||||
# when scaling new etcd will fail to start
|
||||
- name: Configure | Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
# when scaling new etcd will fail to start
|
||||
- name: Configure | Ensure etcd-events is running
|
||||
service:
|
||||
name: etcd-events
|
||||
state: started
|
||||
enabled: yes
|
||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_client_url }} cluster-health | grep -q 'cluster is healthy'"
|
||||
- name: Configure | Wait for etcd cluster to be healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
until: etcd_cluster_is_healthy.rc == 0
|
||||
retries: "{{ etcd_retries }}"
|
||||
|
@ -72,7 +78,6 @@
|
|||
ignore_errors: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- is_etcd_master
|
||||
|
@ -84,8 +89,8 @@
|
|||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CA_FILE: "{{ etcd_cert_dir }}/ca.pem"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_client_url }} cluster-health | grep -q 'cluster is healthy'"
|
||||
- name: Configure | Wait for etcd-events cluster to be healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
until: etcd_events_cluster_is_healthy.rc == 0
|
||||
retries: "{{ etcd_retries }}"
|
||||
|
@ -93,12 +98,10 @@
|
|||
ignore_errors: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- is_etcd_master
|
||||
- etcd_events_cluster_setup
|
||||
- etcd_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
|
@ -136,14 +139,10 @@
|
|||
|
||||
- name: Configure | Join member(s) to etcd cluster one at a time
|
||||
include_tasks: join_etcd_member.yml
|
||||
vars:
|
||||
target_node: "{{ item }}"
|
||||
with_items: "{{ groups['etcd'] }}"
|
||||
when: inventory_hostname == item and etcd_cluster_setup and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||
|
||||
- name: Configure | Join member(s) to etcd-events cluster one at a time
|
||||
include_tasks: join_etcd-events_member.yml
|
||||
vars:
|
||||
target_node: "{{ item }}"
|
||||
with_items: "{{ groups['etcd'] }}"
|
||||
when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
until: member_add_result.rc == 0
|
||||
retries: "{{ etcd_retries }}"
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
@ -21,7 +20,6 @@
|
|||
{{ etcd_member_name }}={{ etcd_events_peer_url }}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}"
|
||||
|
@ -30,7 +28,12 @@
|
|||
check_mode: no
|
||||
tags:
|
||||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Ensure etcd-events is running
|
||||
service:
|
||||
name: etcd-events
|
||||
state: started
|
||||
enabled: yes
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
until: member_add_result.rc == 0
|
||||
retries: "{{ etcd_retries }}"
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
@ -22,7 +21,6 @@
|
|||
{{ etcd_member_name }}={{ etcd_peer_url }}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
|
@ -31,8 +29,13 @@
|
|||
check_mode: no
|
||||
tags:
|
||||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CA_FILE: "{{ etcd_cert_dir }}/ca.pem"
|
||||
|
||||
- name: Configure | Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
|
|
|
@ -103,6 +103,37 @@
|
|||
- not upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | Check if apiserver.crt contains all needed SANs
|
||||
command: openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -checkip "{{ item }}"
|
||||
with_items: "{{ apiserver_sans }}"
|
||||
register: apiserver_sans_check
|
||||
changed_when: "'does match certificate' not in apiserver_sans_check.stdout"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 1/2
|
||||
file:
|
||||
state: absent
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 2/2
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm
|
||||
init phase certs apiserver
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: >-
|
||||
timeout -k 300s 300s
|
||||
|
|
|
@ -59,3 +59,8 @@
|
|||
backup: yes
|
||||
unsafe_writes: yes
|
||||
with_dict: "{{ etc_hosts_localhosts_dict_target }}"
|
||||
|
||||
# gather facts to update ansible_fqdn
|
||||
- name: Update facts
|
||||
setup:
|
||||
gather_subset: min
|
||||
|
|
Loading…
Reference in a new issue