Fix for unknown 'kubernetes.io' or 'k8s.io' labels specified with --node-labels (#4320)
* Fix the file path for all.yml and k8s-cluster.yml * Fix --node-labels namespace error "unknown labels specified" * Update templates and configs kubelet node-labels
This commit is contained in:
parent
6caa639243
commit
586ad89d50
18 changed files with 25 additions and 25 deletions
|
@ -60,9 +60,9 @@ cephfs_provisioner_enabled: false
|
||||||
ingress_nginx_enabled: false
|
ingress_nginx_enabled: false
|
||||||
# ingress_nginx_host_network: false
|
# ingress_nginx_host_network: false
|
||||||
# ingress_nginx_nodeselector:
|
# ingress_nginx_nodeselector:
|
||||||
# node-role.kubernetes.io/node: ""
|
# node.kubernetes.io/node: ""
|
||||||
# ingress_nginx_tolerations:
|
# ingress_nginx_tolerations:
|
||||||
# - key: "node-role.kubernetes.io/master"
|
# - key: "node.kubernetes.io/master"
|
||||||
# operator: "Equal"
|
# operator: "Equal"
|
||||||
# value: ""
|
# value: ""
|
||||||
# effect: "NoSchedule"
|
# effect: "NoSchedule"
|
||||||
|
|
|
@ -30,7 +30,7 @@ spec:
|
||||||
beta.kubernetes.io/os: linux
|
beta.kubernetes.io/os: linux
|
||||||
serviceAccountName: coredns
|
serviceAccountName: coredns
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
- key: "CriticalAddonsOnly"
|
- key: "CriticalAddonsOnly"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
|
@ -46,7 +46,7 @@ spec:
|
||||||
- weight: 100
|
- weight: 100
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
@ -195,7 +195,7 @@ spec:
|
||||||
serviceAccountName: kubernetes-dashboard
|
serviceAccountName: kubernetes-dashboard
|
||||||
{% if dashboard_master_toleration %}
|
{% if dashboard_master_toleration %}
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ spec:
|
||||||
tolerations:
|
tolerations:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
operator: Equal
|
operator: Equal
|
||||||
key: node-role.kubernetes.io/master
|
key: node.kubernetes.io/master
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
@ -58,7 +58,7 @@ spec:
|
||||||
- weight: 100
|
- weight: 100
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
@ -35,12 +35,12 @@ spec:
|
||||||
serviceAccountName: cloud-controller-manager
|
serviceAccountName: cloud-controller-manager
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/master: ""
|
node.kubernetes.io/master: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||||
value: "true"
|
value: "true"
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
operator: Exists
|
operator: Exists
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
ingress_nginx_namespace: "ingress-nginx"
|
ingress_nginx_namespace: "ingress-nginx"
|
||||||
ingress_nginx_host_network: false
|
ingress_nginx_host_network: false
|
||||||
ingress_nginx_nodeselector:
|
ingress_nginx_nodeselector:
|
||||||
node-role.kubernetes.io/node: ""
|
node.kubernetes.io/node: ""
|
||||||
ingress_nginx_tolerations: []
|
ingress_nginx_tolerations: []
|
||||||
ingress_nginx_insecure_port: 80
|
ingress_nginx_insecure_port: 80
|
||||||
ingress_nginx_secure_port: 443
|
ingress_nginx_secure_port: 443
|
||||||
|
|
|
@ -116,7 +116,7 @@ spec:
|
||||||
name: metrics-server-config
|
name: metrics-server-config
|
||||||
{% if not masters_are_not_tainted %}
|
{% if not masters_are_not_tainted %}
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
- key: "CriticalAddonsOnly"
|
- key: "CriticalAddonsOnly"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
|
@ -127,7 +127,7 @@ spec:
|
||||||
- weight: 100
|
- weight: 100
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
@ -29,7 +29,7 @@ spec:
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: CriticalAddonsOnly
|
- key: CriticalAddonsOnly
|
||||||
operator: Exists
|
operator: Exists
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
{% if kube_version is version('v1.11.1', '>=') %}
|
{% if kube_version is version('v1.11.1', '>=') %}
|
||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
|
|
|
@ -176,7 +176,7 @@
|
||||||
- old_apiserver_cert.stat.exists
|
- old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
- name: kubeadm | Remove taint for master with node role
|
- name: kubeadm | Remove taint for master with node role
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
|
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node.kubernetes.io/master:NoSchedule-"
|
||||||
delegate_to: "{{groups['kube-master']|first}}"
|
delegate_to: "{{groups['kube-master']|first}}"
|
||||||
when: inventory_hostname in groups['kube-node']
|
when: inventory_hostname in groups['kube-node']
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -224,7 +224,7 @@ nodeRegistration:
|
||||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||||
taints:
|
taints:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node.kubernetes.io/master
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if container_manager == 'crio' %}
|
{% if container_manager == 'crio' %}
|
||||||
criSocket: /var/run/crio/crio.sock
|
criSocket: /var/run/crio/crio.sock
|
||||||
|
|
|
@ -10,7 +10,7 @@ nodeRegistration:
|
||||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||||
taints:
|
taints:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node.kubernetes.io/master
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if container_manager == 'crio' %}
|
{% if container_manager == 'crio' %}
|
||||||
criSocket: /var/run/crio/crio.sock
|
criSocket: /var/run/crio/crio.sock
|
||||||
|
|
|
@ -10,7 +10,7 @@ nodeRegistration:
|
||||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||||
taints:
|
taints:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node.kubernetes.io/master
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if container_manager == 'crio' %}
|
{% if container_manager == 'crio' %}
|
||||||
criSocket: /var/run/crio/crio.sock
|
criSocket: /var/run/crio/crio.sock
|
||||||
|
|
|
@ -85,12 +85,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{# Kubelet node labels #}
|
{# Kubelet node labels #}
|
||||||
{% set role_node_labels = [] %}
|
{% set role_node_labels = [] %}
|
||||||
{% if inventory_hostname in groups['kube-master'] %}
|
{% if inventory_hostname in groups['kube-master'] %}
|
||||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
|
{% set dummy = role_node_labels.append("node.kubernetes.io/master=''") %}
|
||||||
{% if not standalone_kubelet|bool %}
|
{% if not standalone_kubelet|bool %}
|
||||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
|
{% set dummy = role_node_labels.append("node.kubernetes.io/node=''") %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
|
{% set dummy = role_node_labels.append("node.kubernetes.io/node=''") %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
||||||
{% if inventory_hostname in nvidia_gpu_nodes %}
|
{% if inventory_hostname in nvidia_gpu_nodes %}
|
||||||
|
|
|
@ -24,7 +24,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/master: ""
|
node.kubernetes.io/master: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- operator: Exists
|
- operator: Exists
|
||||||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/node: ""
|
node.kubernetes.io/node: ""
|
||||||
containers:
|
containers:
|
||||||
- name: contiv-etcd-proxy
|
- name: contiv-etcd-proxy
|
||||||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
||||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/master: ""
|
node.kubernetes.io/master: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- operator: Exists
|
- operator: Exists
|
||||||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||||
|
|
|
@ -24,7 +24,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/master: ""
|
node.kubernetes.io/master: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- operator: Exists
|
- operator: Exists
|
||||||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
beta.kubernetes.io/arch: amd64
|
beta.kubernetes.io/arch: amd64
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node.kubernetes.io/master
|
||||||
operator: Exists
|
operator: Exists
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
serviceAccountName: multus
|
serviceAccountName: multus
|
||||||
|
|
Loading…
Reference in a new issue