All CNIs: support ANY toleration. (#3391)

Before, Nodes tainted with NoExecute policy did not have calico/weave Pod.
Network pod should run on all nodes whatever happens on a specific node.

Also always set the Pods to be critical.
Also remove deprecated scheduler.alpha.kubernetes.io/tolerations annotations.
This commit is contained in:
Cédric de Saint Martin 2018-09-27 14:28:54 +02:00 committed by k8s-ci-robot
parent 232020ef96
commit 53d87e53c5
11 changed files with 58 additions and 31 deletions

View file

@ -18,6 +18,7 @@ spec:
labels: labels:
k8s-app: calico-node k8s-app: calico-node
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}" kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
spec: spec:
@ -27,8 +28,10 @@ spec:
hostNetwork: true hostNetwork: true
serviceAccountName: calico-node serviceAccountName: calico-node
tolerations: tolerations:
- effect: NoSchedule - operator: Exists
operator: Exists # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0 terminationGracePeriodSeconds: 0
@ -189,4 +192,4 @@ spec:
updateStrategy: updateStrategy:
rollingUpdate: rollingUpdate:
maxUnavailable: {{ serial | default('20%') }} maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate type: RollingUpdate

View file

@ -13,8 +13,8 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
labels: labels:
k8s-app: canal-node k8s-app: canal-node
spec: spec:
@ -24,8 +24,10 @@ spec:
hostNetwork: true hostNetwork: true
serviceAccountName: canal serviceAccountName: canal
tolerations: tolerations:
- effect: NoSchedule - operator: Exists
operator: Exists # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
# Used by calico/node. # Used by calico/node.
- name: lib-modules - name: lib-modules

View file

@ -27,8 +27,6 @@ spec:
# gets priority scheduling. # gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]
{% if cilium_enable_prometheus %} {% if cilium_enable_prometheus %}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "9090" prometheus.io/port: "9090"
@ -225,11 +223,7 @@ spec:
restartPolicy: Always restartPolicy: Always
tolerations: tolerations:
- effect: NoSchedule - operator: Exists
key: node-role.kubernetes.io/master # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
# Mark cilium's pod as critical for rescheduling
- key: CriticalAddonsOnly - key: CriticalAddonsOnly
operator: "Exists" operator: "Exists"

View file

@ -16,6 +16,7 @@ spec:
labels: labels:
k8s-app: contiv-api-proxy k8s-app: contiv-api-proxy
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
@ -28,8 +29,10 @@ spec:
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: "true"
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netmaster serviceAccountName: contiv-netmaster
containers: containers:
- name: contiv-api-proxy - name: contiv-api-proxy

View file

@ -14,6 +14,9 @@ spec:
metadata: metadata:
labels: labels:
k8s-app: contiv-cleanup k8s-app: contiv-cleanup
annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
priorityClassName: system-node-critical priorityClassName: system-node-critical
@ -21,8 +24,10 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netplugin serviceAccountName: contiv-netplugin
containers: containers:
- name: contiv-ovs-cleanup - name: contiv-ovs-cleanup

View file

@ -25,8 +25,10 @@ spec:
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: "true"
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
initContainers: initContainers:
- name: contiv-etcd-init - name: contiv-etcd-init
image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }} image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}

View file

@ -16,6 +16,7 @@ spec:
labels: labels:
k8s-app: contiv-netmaster k8s-app: contiv-netmaster
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
@ -28,8 +29,10 @@ spec:
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: "true"
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netmaster serviceAccountName: contiv-netmaster
containers: containers:
- name: contiv-netmaster - name: contiv-netmaster

View file

@ -20,6 +20,7 @@ spec:
labels: labels:
k8s-app: contiv-netplugin k8s-app: contiv-netplugin
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
@ -28,8 +29,10 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netplugin serviceAccountName: contiv-netplugin
initContainers: initContainers:
- name: contiv-netplugin-init - name: contiv-netplugin-init

View file

@ -17,6 +17,7 @@ spec:
labels: labels:
k8s-app: contiv-ovs k8s-app: contiv-ovs
annotations: annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
@ -25,8 +26,10 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
effect: NoSchedule # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
containers: containers:
# Runs ovs containers on each Kubernetes node. # Runs ovs containers on each Kubernetes node.
- name: contiv-ovsdb-server - name: contiv-ovsdb-server

View file

@ -51,6 +51,9 @@ spec:
labels: labels:
tier: node tier: node
k8s-app: flannel k8s-app: flannel
annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
priorityClassName: system-node-critical priorityClassName: system-node-critical
@ -108,9 +111,10 @@ spec:
mountPath: /host/opt/cni/bin/ mountPath: /host/opt/cni/bin/
hostNetwork: true hostNetwork: true
tolerations: tolerations:
- key: node-role.kubernetes.io/master - operator: Exists
operator: Exists # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
effect: NoSchedule - key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
- name: run - name: run
hostPath: hostPath:

View file

@ -114,6 +114,9 @@ items:
metadata: metadata:
labels: labels:
name: weave-net name: weave-net
annotations:
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
{% if kube_version|version_compare('v1.11.1', '>=') %} {% if kube_version|version_compare('v1.11.1', '>=') %}
priorityClassName: system-node-critical priorityClassName: system-node-critical
@ -224,8 +227,10 @@ items:
seLinuxOptions: {} seLinuxOptions: {}
serviceAccountName: weave-net serviceAccountName: weave-net
tolerations: tolerations:
- effect: NoSchedule - operator: Exists
operator: Exists # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
- name: weavedb - name: weavedb
hostPath: hostPath: