diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 6176ba893..48732c3d6 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -150,8 +150,8 @@ - name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined) shell: >- - {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm_token }} || :; - {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }} + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :; + {{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }} changed_when: false when: - inventory_hostname == groups['kube_control_plane']|first @@ -161,7 +161,7 @@ - kubeadm_token - name: Create kubeadm token for joining nodes with 24h expiration (default) - command: "{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create" + command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create" changed_when: false register: temp_token retries: 5 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index a809f0ee1..c027a1f8d 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -62,7 +62,7 @@ - name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode command: >- {{ bin_dir }}/kubectl - --kubeconfig /etc/kubernetes/admin.conf + --kubeconfig {{ kube_config_dir }}/admin.conf -n kube-system scale deployment/coredns --replicas 0 register: scale_down_coredns diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index 2cafaeb7f..80746acda 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -9,7 +9,7 @@ - name: remove-node | Drain node except daemonsets resource # noqa 301 command: >- - {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain + {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain --force --ignore-daemonsets --grace-period {{ drain_grace_period }} diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index 805677f86..e56c1a1b2 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Uncordon node - command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index 64e01d901..ef28189b3 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -84,7 +84,7 @@ delay: "{{ drain_retry_delay_seconds }}" rescue: - name: Set node back to schedulable - command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ inventory_hostname }}" + command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}" when: upgrade_node_uncordon_after_drain_failure - name: Fail after rescue fail: