Ensure we always fixup kube-proxy kubeconfig (#5524)
When running with serial != 100%, like upgrade_cluster.yml, we need to apply this fixup each time
Problem was introduced in 05dc2b3a09
Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
This commit is contained in:
parent
06ffe44f1f
commit
5e9479cded
1 changed files with 3 additions and 3 deletions
|
@ -111,8 +111,8 @@
|
||||||
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
||||||
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
|
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
|
||||||
run_once: true
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master']|first
|
|
||||||
- kubeadm_config_api_fqdn is not defined
|
- kubeadm_config_api_fqdn is not defined
|
||||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||||
- not kube_proxy_remove
|
- not kube_proxy_remove
|
||||||
|
@ -129,8 +129,8 @@
|
||||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||||
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master']|first
|
|
||||||
- kubeadm_config_api_fqdn is not defined
|
- kubeadm_config_api_fqdn is not defined
|
||||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||||
- not kube_proxy_remove
|
- not kube_proxy_remove
|
||||||
|
@ -153,8 +153,8 @@
|
||||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
||||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master']|first
|
|
||||||
- kube_proxy_remove
|
- kube_proxy_remove
|
||||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||||
# When scaling/adding nodes in the existing k8s cluster, kube-proxy wouldn't be created, as `kubeadm init` wouldn't run.
|
# When scaling/adding nodes in the existing k8s cluster, kube-proxy wouldn't be created, as `kubeadm init` wouldn't run.
|
||||||
|
|
Loading…
Reference in a new issue