Merge pull request #1567 from mattymo/tolerations

Enable scheduling of critical pods and network plugins on master
This commit is contained in:
Brad Beam 2017-08-24 08:40:41 -05:00 committed by GitHub
commit af211b3d71
11 changed files with 48 additions and 18 deletions

View file

@ -31,20 +31,23 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec: spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: autoscaler - name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
memory: "10Mi" memory: "10Mi"
command: command:
- /cluster-proportional-autoscaler - /cluster-proportional-autoscaler
- --namespace=kube-system - --namespace=kube-system
- --configmap=dnsmasq-autoscaler - --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq - --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate. # If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true - --logtostderr=true
- --v={{ kube_log_level }} - --v={{ kube_log_level }}

View file

@ -21,6 +21,9 @@ spec:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
spec: spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: dnsmasq - name: dnsmasq
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}" image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"

View file

@ -29,11 +29,15 @@ spec:
k8s-app: kubedns-autoscaler k8s-app: kubedns-autoscaler
annotations: annotations:
scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
tolerations:
- effect: NoSchedule
operator: Exists
- effect: CriticalAddonsOnly
operator: exists
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"

View file

@ -30,6 +30,8 @@ spec:
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- effect: NoSchedule
operator: Exists
volumes: volumes:
- name: kube-dns-config - name: kube-dns-config
configMap: configMap:

View file

@ -12,6 +12,9 @@ spec:
labels: labels:
app: netchecker-agent app: netchecker-agent
spec: spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: netchecker-agent - name: netchecker-agent
image: "{{ agent_img }}" image: "{{ agent_img }}"

View file

@ -16,6 +16,9 @@ spec:
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirstWithHostNet
{% endif %} {% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: netchecker-agent - name: netchecker-agent
image: "{{ agent_img }}" image: "{{ agent_img }}"

View file

@ -17,6 +17,9 @@ spec:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}" version: "v{{ fluentd_version }}"
spec: spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: fluentd-es - name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}" image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"

View file

@ -21,6 +21,9 @@ spec:
k8s-app: calico-policy k8s-app: calico-policy
spec: spec:
hostNetwork: true hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
containers: containers:
- name: calico-policy-controller - name: calico-policy-controller
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}

View file

@ -18,6 +18,9 @@ spec:
k8s-app: canal-node k8s-app: canal-node
spec: spec:
hostNetwork: true hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
volumes: volumes:
# Used by calico/node. # Used by calico/node.
- name: lib-modules - name: lib-modules

View file

@ -8,6 +8,9 @@ metadata:
app: "flannel" app: "flannel"
version: "v0.1" version: "v0.1"
spec: spec:
tolerations:
- effect: NoSchedule
operator: Exists
volumes: volumes:
- name: "subnetenv" - name: "subnetenv"
hostPath: hostPath: