dff78f616e
According to http://kubernetes.io/docs/user-guide/images/ : By default, the kubelet will try to pull each image from the specified registry. However, if the imagePullPolicy property of the container is set to IfNotPresent or Never, then a local\ image is used (preferentially or exclusively, respectively). Use IfNotPresent value to allow images prepared by the download role dependencies to be effectively used by kubelet without pull errors resulting apps to stay blocked in PullBackOff/Error state even when there are images on the localhost exist. Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
103 lines
3.2 KiB
YAML
103 lines
3.2 KiB
YAML
apiVersion: v1
|
|
kind: ReplicationController
|
|
metadata:
|
|
name: kubedns
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: kubedns
|
|
version: v19
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
k8s-app: kubedns
|
|
version: v19
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: kubedns
|
|
version: v19
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
containers:
|
|
- name: kubedns
|
|
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
resources:
|
|
# TODO: Set memory limits when we've profiled the container for large
|
|
# clusters, then set request = limit to keep this container in
|
|
# guaranteed class. Currently, this container falls into the
|
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
|
limits:
|
|
cpu: 100m
|
|
memory: 170Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 70Mi
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /healthz
|
|
port: 8080
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /readiness
|
|
port: 8081
|
|
scheme: HTTP
|
|
# we poll on pod startup for the Kubernetes master service and
|
|
# only setup the /readiness HTTP server once that's available.
|
|
initialDelaySeconds: 30
|
|
timeoutSeconds: 5
|
|
args:
|
|
# command = "/kube-dns"
|
|
- --domain={{ dns_domain }}.
|
|
- --dns-port=10053
|
|
ports:
|
|
- containerPort: 10053
|
|
name: dns-local
|
|
protocol: UDP
|
|
- containerPort: 10053
|
|
name: dns-tcp-local
|
|
protocol: TCP
|
|
- name: dnsmasq
|
|
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
args:
|
|
- --log-facility=-
|
|
- --cache-size=1000
|
|
- --no-resolv
|
|
- --server=127.0.0.1#10053
|
|
ports:
|
|
- containerPort: 53
|
|
name: dns
|
|
protocol: UDP
|
|
- containerPort: 53
|
|
name: dns-tcp
|
|
protocol: TCP
|
|
- name: healthz
|
|
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
|
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
resources:
|
|
# keep request = limit to keep this container in guaranteed class
|
|
limits:
|
|
cpu: 10m
|
|
memory: 50Mi
|
|
requests:
|
|
cpu: 10m
|
|
# Note that this container shouldn't really need 50Mi of memory. The
|
|
# limits are set higher than expected pending investigation on #29688.
|
|
# The extra memory was stolen from the kubedns container to keep the
|
|
# net memory requested by the pod constant.
|
|
memory: 50Mi
|
|
args:
|
|
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
|
|
- -port=8080
|
|
- -quiet
|
|
ports:
|
|
- containerPort: 8080
|
|
protocol: TCP
|
|
dnsPolicy: Default # Don't use cluster DNS.
|