Gather just the necessary facts (#5955)

* Gather just the necessary facts

* Move fact gathering to separate playbook.
This commit is contained in:
Lovro Seder 2020-04-18 01:23:36 +02:00 committed by GitHub
parent 7930f6fa0a
commit 27a268df33
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 63 additions and 4 deletions

View file

@ -40,7 +40,11 @@
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
import_playbook: facts.yml
- hosts: k8s-cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -50,6 +54,7 @@
environment: "{{ proxy_env }}"
- hosts: etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -61,6 +66,7 @@
when: not etcd_kubeadm_enabled| default(false)
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -72,6 +78,7 @@
when: not etcd_kubeadm_enabled| default(false)
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -79,6 +86,7 @@
environment: "{{ proxy_env }}"
- hosts: kube-master
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -87,6 +95,7 @@
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -95,12 +104,14 @@
- { role: kubernetes/node-label, tags: node-label }
- hosts: calico-rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube-master[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -108,6 +119,7 @@
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube-master
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -118,6 +130,7 @@
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: kube-master
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -125,6 +138,7 @@
environment: "{{ proxy_env }}"
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }

View file

@ -12,6 +12,8 @@ This should be the easiest.
You can use `--limit=node1` to limit Kubespray to avoid disturbing other nodes in the cluster.
Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes.
### 3) Drain the node that will be removed
```sh

19
facts.yml Normal file
View file

@ -0,0 +1,19 @@
---
- name: Gather facts
hosts: k8s-cluster:etcd:calico-rr
gather_facts: False
tasks:
- name: Gather minimal facts
setup:
gather_subset: '!all'
- name: Gather necessary facts
setup:
gather_subset: '!all,!min,network,hardware'
filter: "{{ item }}"
loop:
- ansible_distribution_major_version
- ansible_default_ipv4
- ansible_all_ipv4_addresses
- ansible_memtotal_mb
- ansible_swaptotal_mb

View file

@ -33,15 +33,18 @@
- { role: kubespray-defaults }
- { role: remove-node/pre-remove, tags: pre-remove }
- name: Gather facts
import_playbook: facts.yml
- hosts: "{{ node | default('kube-node') }}"
gather_facts: yes
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: reset, tags: reset, when: reset_nodes|default(True) }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
gather_facts: yes
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: remove-node/post-remove, tags: post-remove }

View file

@ -19,10 +19,11 @@
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: all
gather_facts: true
- name: Gather facts
import_playbook: facts.yml
- hosts: etcd:k8s-cluster:calico-rr
gather_facts: False
vars_prompt:
name: "reset_confirmation"
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."

View file

@ -41,8 +41,12 @@
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
import_playbook: facts.yml
- name: Generate the etcd certificates beforehand
hosts: etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -58,6 +62,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes
hosts: kube-node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }

View file

@ -44,8 +44,12 @@
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
import_playbook: facts.yml
- name: Download images to ansible host cache via first kube-master node
hosts: kube-master[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
@ -55,6 +59,7 @@
- name: Prepare nodes for upgrade
hosts: k8s-cluster:etcd:calico-rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -64,6 +69,7 @@
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico-rr:!k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
@ -72,6 +78,7 @@
environment: "{{ proxy_env }}"
- hosts: etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -83,6 +90,7 @@
when: not etcd_kubeadm_enabled | default(false)
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -94,6 +102,7 @@
when: not etcd_kubeadm_enabled | default(false)
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: False
hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
@ -112,6 +121,7 @@
- name: Upgrade calico and external cloud provider on all masters and nodes
hosts: kube-master:kube-node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
@ -123,6 +133,7 @@
- name: Finally handle worker upgrades, based on given batch size
hosts: kube-node:!kube-master
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
@ -136,6 +147,7 @@
environment: "{{ proxy_env }}"
- hosts: kube-master[0]
gather_facts: False
any_errors_fatal: true
roles:
- { role: kubespray-defaults }
@ -143,6 +155,7 @@
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: calico-rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -150,6 +163,7 @@
environment: "{{ proxy_env }}"
- hosts: kube-master
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
@ -157,6 +171,7 @@
environment: "{{ proxy_env }}"
- hosts: k8s-cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }