Make kubedns up to date
Update kube-dns version to 1.14.2 https://github.com/kubernetes/kubernetes/pull/45684
This commit is contained in:
parent
d2b793057e
commit
d5516a4ca9
5 changed files with 103 additions and 65 deletions
|
@ -1,23 +1,20 @@
|
|||
# Versions
|
||||
kubedns_version: 1.9
|
||||
kubednsmasq_version: 1.3
|
||||
exechealthz_version: 1.1
|
||||
kubedns_version : 1.14.2
|
||||
|
||||
# Limits for dnsmasq/kubedns apps
|
||||
dns_cpu_limit: 100m
|
||||
dns_memory_limit: 170Mi
|
||||
dns_cpu_requests: 70m
|
||||
dns_memory_requests: 50Mi
|
||||
dns_cpu_requests: 100m
|
||||
dns_memory_requests: 70Mi
|
||||
kubedns_min_replicas: 1
|
||||
kubedns_nodes_per_replica: 10
|
||||
|
||||
# Images
|
||||
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
|
||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
||||
kubedns_image_tag: "{{ kubedns_version }}"
|
||||
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
|
||||
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
|
||||
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
|
||||
exechealthz_image_tag: "{{ exechealthz_version }}"
|
||||
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
|
||||
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
|
||||
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
|
||||
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
|
||||
|
||||
# Netchecker
|
||||
deploy_netchecker: false
|
||||
|
@ -40,3 +37,4 @@ netchecker_server_memory_requests: 64M
|
|||
# SSL
|
||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
src: "{{item.file}}"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kube-dns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
|
||||
register: manifests
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
|
|
|
@ -42,7 +42,7 @@ spec:
|
|||
- --namespace=kube-system
|
||||
- --configmap=kubedns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||
- --target=Deployment/kubedns
|
||||
- --target=Deployment/kube-dns
|
||||
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
|
|
|
@ -1,25 +1,39 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubedns
|
||||
namespace: {{ system_namespace }}
|
||||
name: kube-dns
|
||||
namespace: "{{system_namespace}}"
|
||||
labels:
|
||||
k8s-app: kubedns
|
||||
version: v19
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: {{ kubedns_min_replicas }}
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubedns
|
||||
version: v19
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubedns
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
|
||||
|
@ -30,15 +44,14 @@ spec:
|
|||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: {{ dns_cpu_limit }}
|
||||
memory: {{ dns_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ dns_cpu_requests }}
|
||||
memory: {{ dns_memory_requests }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
|
@ -51,13 +64,16 @@ spec:
|
|||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 30
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
# command = "/kube-dns"
|
||||
- --domain={{ dns_domain }}.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v={{ kube_log_level }}
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
|
@ -65,25 +81,36 @@ spec:
|
|||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
|
||||
image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ dns_cpu_limit }}
|
||||
memory: {{ dns_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ dns_cpu_requests }}
|
||||
memory: {{ dns_memory_requests }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --log-facility=-
|
||||
- -v={{ kube_log_level }}
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-resolv
|
||||
- --server=127.0.0.1#10053
|
||||
{% if kube_log_level == '4' %}
|
||||
- --log-queries
|
||||
{% endif %}
|
||||
- --local=/{{ bogus_domains }}
|
||||
- --log-facility=-
|
||||
- --server=/{{ dns_domain }}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
|
@ -91,26 +118,37 @@ spec:
|
|||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- name: healthz
|
||||
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
# Note that this container shouldn't really need 50Mi of memory. The
|
||||
# limits are set higher than expected pending investigation on #29688.
|
||||
# The extra memory was stolen from the kubedns container to keep the
|
||||
# net memory requested by the pod constant.
|
||||
memory: 50Mi
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
|
||||
- -port=8080
|
||||
- -quiet
|
||||
- --v={{ kube_log_level }}
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,A
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubedns
|
||||
name: kube-dns
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
k8s-app: kubedns
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "kubedns"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kubedns
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ skydns_server }}
|
||||
ports:
|
||||
- name: dns
|
||||
|
@ -18,3 +19,4 @@ spec:
|
|||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
|
|
Loading…
Reference in a new issue