c12s-kubespray/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml

115 lines
3.7 KiB
YAML
Raw Normal View History

2016-09-01 17:01:15 +00:00
apiVersion: v1
kind: ReplicationController
metadata:
name: kubedns
namespace: {{ system_namespace }}
2016-09-01 17:01:15 +00:00
labels:
k8s-app: kubedns
2016-09-01 17:01:15 +00:00
version: v19
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ dns_replicas }}
2016-09-01 17:01:15 +00:00
selector:
k8s-app: kubedns
2016-09-01 17:01:15 +00:00
version: v19
template:
metadata:
labels:
k8s-app: kubedns
2016-09-01 17:01:15 +00:00
version: v19
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kubedns
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
2016-09-01 17:01:15 +00:00
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
2016-09-01 17:01:15 +00:00
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
2016-09-01 17:01:15 +00:00
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain={{ dns_domain }}.
2016-09-01 17:01:15 +00:00
- --dns-port=10053
- --v={{ kube_log_level }}
2016-09-01 17:01:15 +00:00
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
2016-09-01 17:01:15 +00:00
args:
- --log-facility=-
2016-09-01 17:01:15 +00:00
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
{% if kube_log_level == 4 %}
- --log-queries
{% endif %}
2016-09-01 17:01:15 +00:00
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: healthz
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
2016-09-01 17:01:15 +00:00
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory. The
# limits are set higher than expected pending investigation on #29688.
# The extra memory was stolen from the kubedns container to keep the
# net memory requested by the pod constant.
memory: 50Mi
args:
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
2016-09-01 17:01:15 +00:00
- -port=8080
- -quiet
ports:
- containerPort: 8080
protocol: TCP
dnsPolicy: Default # Don't use cluster DNS.