Merge pull request #1567 from mattymo/tolerations

Enable scheduling of critical pods and network plugins on master
This commit is contained in:
Brad Beam 2017-08-24 08:40:41 -05:00 committed by GitHub
commit af211b3d71
11 changed files with 48 additions and 18 deletions

View file

@ -31,20 +31,23 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true
- --v={{ kube_log_level }}
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true
- --v={{ kube_log_level }}

View file

@ -21,6 +21,9 @@ spec:
kubernetes.io/cluster-service: "true"
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: dnsmasq
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"

View file

@ -29,11 +29,15 @@ spec:
k8s-app: kubedns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: autoscaler
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
tolerations:
- effect: NoSchedule
operator: Exists
- effect: CriticalAddonsOnly
operator: exists
resources:
requests:
cpu: "20m"

View file

@ -30,6 +30,8 @@ spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:

View file

@ -12,6 +12,9 @@ spec:
labels:
app: netchecker-agent
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ agent_img }}"

View file

@ -16,6 +16,9 @@ spec:
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ agent_img }}"

View file

@ -17,6 +17,9 @@ spec:
kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"

View file

@ -21,6 +21,9 @@ spec:
k8s-app: calico-policy
spec:
hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: calico-policy-controller
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}

View file

@ -18,6 +18,9 @@ spec:
k8s-app: canal-node
spec:
hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
# Used by calico/node.
- name: lib-modules

View file

@ -8,6 +8,9 @@ metadata:
app: "flannel"
version: "v0.1"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: "subnetenv"
hostPath:

View file

@ -153,4 +153,4 @@ items:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
path: /lib/modules