diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/rotate_tokens.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml similarity index 68% rename from roles/kubernetes-apps/rotate_tokens/tasks/rotate_tokens.yml rename to roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 0800c78c7..bb17d9066 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/rotate_tokens.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -3,7 +3,6 @@ command: >- {{ bin_dir }}/kubectl get pods --all-namespaces -o 'jsonpath={range .items[*]}{.metadata.namespace}{" "}{.metadata.name}{" "}{.spec.volumes[*].name}{"\n"}{end}' - register: pods_secrets run_once: true @@ -15,28 +14,12 @@ register: tokens_to_delete run_once: true -- name: view pods_secrets - debug: msg="{{ pods_secrets.stdout_lines }}" - -- name: view pods_secrets2 - #debug: msg="{{ item.split(" ")[0] }}" - debug: msg="{{ item.split(" ")[0] }} {{ item.split(" ")[1] }}" - with_items: "{{ tokens_to_delete.stdout_lines }}" - - name: Rotate Tokens | Delete expired tokens command: "{{ bin_dir }}/kubectl delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}" with_items: "{{ tokens_to_delete.stdout_lines }}" run_once: true -- set_fact: - t2d: |- - ["default default-token-38nh5", - "kube-public default-token-cx54r", - "kube-system default-token-d6dfh", - "default default-token-b58hs" - ] - -- name: Rotate Tokens | Delete pods with default tokens +- name: Rotate Tokens | Delete pods command: "{{ bin_dir }}/kubectl delete pod -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}" with_items: "{{ pods_secrets.stdout_lines }}" register: delete_pods diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 3da0b6707..3eae97a4c 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -24,7 +24,7 @@ register: kubeadm_client_conf - name: Join to cluster if needed - command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks + command: "{{ bin_dir }}/kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks" register: kubeadm_join when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists) diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml index 0779a623d..67e84a509 100644 --- a/roles/kubernetes/master/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml @@ -61,20 +61,22 @@ register: kubeadm_config - name: kubeadm | Initialize first master - command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks + command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks register: kubeadm_init #Retry is because upload config sometimes fails retries: 3 when: inventory_hostname == groups['kube-master']|first and not admin_conf.stat.exists failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr + notify: Master | restart kubelet - name: kubeadm | Upgrade first master - command: timeout -k 240s 240s kubeadm upgrade apply --config={{ kube_config_dir }}/kubeadm-config.yaml {{ kube_version }} --skip-preflight-checks + command: timeout -k 240s 240s {{ bin_dir }}/kubeadm upgrade apply --config={{ kube_config_dir }}/kubeadm-config.yaml {{ kube_version }} --skip-preflight-checks register: kubeadm_upgrade #Retry is because upload config sometimes fails retries: 3 when: inventory_hostname == groups['kube-master']|first and (kubeadm_config.changed and admin_conf.stat.exists) failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr + notify: Master | restart kubelet - name: slurp kubeadm certs slurp: @@ -109,16 +111,18 @@ when: inventory_hostname != groups['kube-master']|first - name: kubeadm | Init other uninitialized masters - command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks + command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks register: kubeadm_init when: inventory_hostname != groups['kube-master']|first and not admin_conf.stat.exists failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr + notify: Master | restart kubelet -- name: kubeadm | Upgrade first master - command: timeout -k 240s 240s kubeadm upgrade apply --config={{ kube_config_dir }}/kubeadm-config.yaml {{ kube_version }} --skip-preflight-checks +- name: kubeadm | Upgrade other masters + command: timeout -k 240s 240s {{ bin_dir }}/kubeadm upgrade apply --config={{ kube_config_dir }}/kubeadm-config.yaml {{ kube_version }} --skip-preflight-checks register: kubeadm_upgrade when: inventory_hostname != groups['kube-master']|first and (kubeadm_config.changed and admin_conf.stat.exists) failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr + notify: Master | restart kubelet - name: kubeadm | Check service account key again stat: diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index 7269dab35..9d0c44e25 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -16,7 +16,10 @@ - name: Get pod names shell: "{{bin_dir}}/kubectl get pods -o json" register: pods - until: '"ContainerCreating" not in pods.stdout and "Terminating" not in pods.stdout' + until: + - '"ContainerCreating" not in pods.stdout' + - '"Pending" not in pods.stdout' + - '"Terminating" not in pods.stdout' retries: 60 delay: 2 no_log: true diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index a20123ec7..28261027f 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -67,7 +67,6 @@ - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } - { role: network_plugin, tags: network } - - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" } - { role: upgrade/post-upgrade, tags: post-upgrade } #Finally handle worker upgrades, based on given batch size