diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 538fc22fc..382fb6602 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -1,6 +1,6 @@ --- - name: Kubernetes Apps | Register coredns deployment annotation `createdby` - shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" + command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" register: createdby_annotation changed_when: false ignore_errors: true # noqa ignore-errors diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml index 58688ae4a..269d2ec18 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -29,7 +29,7 @@ tags: vsphere-csi-driver - name: vSphere CSI Driver | Generate a CSI secret manifest - command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" + command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" register: vsphere_csi_secret_manifest when: inventory_hostname == groups['kube_control_plane'][0] no_log: true @@ -37,7 +37,7 @@ - name: vSphere CSI Driver | Apply a CSI secret manifest command: - cmd: "{{ bin_dir }}/kubectl apply -f -" + cmd: "{{ kubectl }} apply -f -" stdin: "{{ vsphere_csi_secret_manifest.stdout }}" when: inventory_hostname == groups['kube_control_plane'][0] no_log: true diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml index 1c1534698..0f03dbb31 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: CephFS Provisioner | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }} + command: > + {{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] @@ -19,8 +19,8 @@ - upgrade - name: CephFS Provisioner | Remove legacy storageclass - shell: | - {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }} + command: > + {{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml index 06bc18849..e1c1241a6 100644 --- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: RBD Provisioner | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }} + command: > + {{ kubectl }} delete namespace {{ rbd_provisioner_namespace }} ignore_errors: true # noqa ignore-errrors when: - inventory_hostname == groups['kube_control_plane'][0] @@ -19,8 +19,8 @@ - upgrade - name: RBD Provisioner | Remove legacy storageclass - shell: | - {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }} + command: > + {{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }} ignore_errors: true # noqa ignore-errrors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml index ce46aada5..33f2dbcf8 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: Cert Manager | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }} + command: > + {{ kubectl }} delete namespace {{ cert_manager_namespace }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml index 45a64d2b2..25f9a7132 100644 --- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -12,7 +12,7 @@ run_once: true - name: kube-router | Wait for kube-router pods to be ready - command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors register: pods_not_ready until: pods_not_ready.stdout.find("kube-router")==-1 retries: 30 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index ef6a7ac73..e99f2f840 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -190,7 +190,7 @@ # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. - name: kubeadm | Remove taint for master with node role - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}" + command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" delegate_to: "{{ first_kube_control_plane }}" with_items: - "node-role.kubernetes.io/master:NoSchedule-" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index fe690fc3f..769ff3107 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -61,8 +61,7 @@ # FIXME: https://github.com/kubernetes/kubeadm/issues/1318 - name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode command: >- - {{ bin_dir }}/kubectl - --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} -n kube-system scale deployment/coredns --replicas 0 register: scale_down_coredns diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 8db58d34f..840a9cd68 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -115,9 +115,9 @@ # incorrectly to first master, creating SPoF. - name: Update server field in kube-proxy kubeconfig shell: >- - set -o pipefail && {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml + set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' - | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f - + | {{ kubectl }} replace -f - args: executable: /bin/bash run_once: true @@ -139,7 +139,7 @@ mode: "0644" - name: Restart all kube-proxy pods to ensure that they load the new configmap - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" + command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true delegate_to: "{{ groups['kube_control_plane']|first }}" delegate_facts: false diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml index b7f8138a6..f91e7f459 100644 --- a/roles/kubernetes/node-label/tasks/main.yml +++ b/roles/kubernetes/node-label/tasks/main.yml @@ -42,7 +42,7 @@ - name: Set label to node command: >- - {{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true + {{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true loop: "{{ role_node_labels + inventory_node_labels }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" changed_when: false diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 106deb21f..556766a16 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -138,6 +138,10 @@ kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" +# Kubectl command +# This is for consistency when using kubectl command in roles, and ensure +kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf" + # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index 74d3f7915..162aca150 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -19,7 +19,7 @@ - name: Calico | Get kubelet hostname shell: >- - set -o pipefail && {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' + set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 args: executable: /bin/bash diff --git a/roles/network_plugin/calico/tasks/typha_certs.yml b/roles/network_plugin/calico/tasks/typha_certs.yml index c2647a1cb..9f94067bc 100644 --- a/roles/network_plugin/calico/tasks/typha_certs.yml +++ b/roles/network_plugin/calico/tasks/typha_certs.yml @@ -1,6 +1,6 @@ --- - name: Calico | Check if typha-server exists - command: "{{ bin_dir }}/kubectl -n kube-system get secret typha-server" + command: "{{ kubectl }} -n kube-system get secret typha-server" register: typha_server_secret changed_when: false failed_when: false @@ -35,7 +35,7 @@ - name: Calico | Create typha tls secrets command: >- - {{ bin_dir }}/kubectl -n kube-system + {{ kubectl }} -n kube-system create secret tls {{ item.name }} --cert {{ item.cert }} --key {{ item.key }} diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml index 2a967adbc..89ccb1e56 100644 --- a/roles/network_plugin/cilium/tasks/apply.yml +++ b/roles/network_plugin/cilium/tasks/apply.yml @@ -11,7 +11,7 @@ when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Cilium | Wait for pods to run - command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index 2efafa4cd..3278642b1 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kube-OVN | Label ovn-db node command: >- - {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index 30190124d..e91249f7d 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -1,20 +1,20 @@ --- - name: kube-router | Add annotations on kube_control_plane - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_master }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] - name: kube-router | Add annotations on kube_node - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] - name: kube-router | Add common annotations on all servers - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index a014fd436..0c381c79e 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Macvlan | Retrieve Pod Cidr - command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" changed_when: false register: node_pod_cidr_cmd delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml new file mode 100644 index 000000000..51f9eaa29 --- /dev/null +++ b/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: ovn4nfv | Label control-plane node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + register: ovn4nfv_node_manifests diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml index 450e6f36d..4a4e3eb7e 100644 --- a/roles/recover_control_plane/control-plane/tasks/main.yml +++ b/roles/recover_control_plane/control-plane/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Wait for apiserver - command: "{{ bin_dir }}/kubectl get nodes" + command: "{{ kubectl }} get nodes" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" register: apiserver_is_ready @@ -11,7 +11,7 @@ when: groups['broken_kube_control_plane'] - name: Delete broken kube_control_plane nodes from cluster - command: "{{ bin_dir }}/kubectl delete node {{ item }}" + command: "{{ kubectl }} delete node {{ item }}" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" with_items: "{{ groups['broken_kube_control_plane'] }}" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index 6ca8c2a68..31dd462f4 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Delete node - command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane']|first }}" when: inventory_hostname in groups['k8s_cluster'] retries: 10 diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index d92004809..32d4f9831 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: remove-node | List nodes command: >- - {{ bin_dir }}/kubectl get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} + {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} register: nodes delegate_to: "{{ groups['kube_control_plane']|first }}" changed_when: false @@ -9,7 +9,7 @@ - name: remove-node | Drain node except daemonsets resource # noqa 301 command: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ drain_grace_period }} diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index c69dd9069..3d01f332b 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -1,8 +1,8 @@ --- - name: Lookup node IP in kubernetes - shell: >- - {{ bin_dir }}/kubectl get nodes {{ node }} - -o jsonpath='{range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}' + command: > + {{ kubectl }} get nodes {{ node }} + -o jsonpath={range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end} register: remove_node_ip when: - inventory_hostname in groups['etcd'] diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index f19ecafb1..f460d0863 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -4,7 +4,7 @@ - needs_cordoning|default(false) - kube_network_plugin == 'cilium' command: > - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} wait pod -n kube-system -l k8s-app=cilium --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}' --for=condition=Ready @@ -12,7 +12,7 @@ delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Uncordon node - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index 36d06224e..9aad57e0e 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -17,9 +17,9 @@ # Node Ready: type = ready, status = True # Node NotReady: type = ready, status = Unknown - name: See if node is in ready state - shell: >- - {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} - -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath={ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end } register: kubectl_node_ready delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false @@ -28,9 +28,9 @@ # SchedulingDisabled: unschedulable = true # else unschedulable key doesn't exist - name: See if node is schedulable - shell: >- - {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} - -o jsonpath='{ .spec.unschedulable }' + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath={ .spec.unschedulable } register: kubectl_node_schedulable delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false @@ -48,11 +48,11 @@ - name: Node draining block: - name: Cordon node - command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Check kubectl version - command: "{{ bin_dir }}/kubectl version --client --short" + command: "{{ kubectl }} version --client --short" register: kubectl_version delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: yes @@ -70,7 +70,7 @@ - name: Drain node command: >- - {{ bin_dir }}/kubectl drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }} @@ -98,7 +98,7 @@ - name: Drain node - fallback with disabled eviction command: >- - {{ bin_dir }}/kubectl drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ drain_fallback_grace_period }} @@ -117,7 +117,7 @@ rescue: - name: Set node back to schedulable - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}" + command: "{{ kubectl }} uncordon {{ inventory_hostname }}" when: upgrade_node_uncordon_after_drain_failure - name: Fail after rescue fail: diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml index 77da68352..a6c70edbd 100644 --- a/roles/win_nodes/kubernetes_patch/tasks/main.yml +++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -12,9 +12,9 @@ # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" - name: Check current nodeselector for kube-proxy daemonset command: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} get ds kube-proxy --namespace=kube-system - -o jsonpath='{.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}}' + -o jsonpath={.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}} register: current_kube_proxy_state retries: 60 delay: 5 @@ -22,8 +22,8 @@ changed_when: false - name: Apply nodeselector patch for kube-proxy daemonset - shell: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + command: > + {{ kubectl }} patch ds kube-proxy --namespace=kube-system --type=strategic -p '{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}' register: patch_kube_proxy_state