Tune dnsmasq/kubedns limits, replicas, logging

* Add dns_replicas, dns_memory/cpu_limit/requests vars for
dns related apps.
* When kube_log_level=4, log dnsmasq queries as well.
* Add log level control for skydns (part of kubedns app).
* Add limits/requests vars for dnsmasq (part of kubedns app) and
  dnsmasq daemon set.
* Drop string defaults for kube_log_level as it is int and
  is defined in the global vars as well.
* Add docs

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
Bogdan Dobrelya 2016-11-25 11:33:39 +01:00
parent 6d29a5981c
commit 2d18e19263
11 changed files with 47 additions and 12 deletions

View file

@ -21,5 +21,10 @@ For a large scaled deployments, consider the following configuration changes:
load on a delegate (the first K8s master node) then retrying failed
push or download operations.
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
Please note that limits must always be greater than or equal to requests.
For example, when deploying 200 nodes, you may want to run ansible with
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.

View file

@ -27,3 +27,9 @@ skip_dnsmasq: false
# Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi

View file

@ -20,6 +20,9 @@ server=169.254.169.254
server=8.8.4.4
{% endif %}
{% if kube_log_level == 4 %}
log-queries
{% endif %}
bogus-priv
no-resolv
no-negcache

View file

@ -29,8 +29,11 @@ spec:
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 256M
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
ports:
- name: dns
containerPort: 53

View file

@ -3,6 +3,13 @@ kubedns_version: 1.7
kubednsmasq_version: 1.3
exechealthz_version: 1.1
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi
dns_replicas: 1
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"

View file

@ -8,7 +8,7 @@ metadata:
version: v19
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
replicas: {{ dns_replicas }}
selector:
k8s-app: kubedns
version: v19
@ -29,11 +29,11 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 170Mi
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: 100m
memory: 70Mi
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
livenessProbe:
httpGet:
path: /healthz
@ -56,6 +56,7 @@ spec:
# command = "/kube-dns"
- --domain={{ dns_domain }}.
- --dns-port=10053
- --v={{ kube_log_level }}
ports:
- containerPort: 10053
name: dns-local
@ -66,11 +67,21 @@ spec:
- name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args:
- --log-facility=-
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
{% if kube_log_level == 4 %}
- --log-queries
{% endif %}
ports:
- containerPort: 53
name: dns

View file

@ -41,7 +41,7 @@ spec:
{% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --allow-privileged=true
{% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{ cloud_provider }}

View file

@ -19,7 +19,7 @@ spec:
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
{% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config

View file

@ -16,7 +16,7 @@ spec:
- scheduler
- --leader-elect=true
- --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
livenessProbe:
httpGet:
host: 127.0.0.1

View file

@ -5,7 +5,7 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
# logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true"
{% endif %}
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
# The port for the info server to serve on

View file

@ -14,7 +14,7 @@ spec:
command:
- /hyperkube
- proxy
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml