c12s-kubespray/roles/kubernetes/node/templates/kubelet.standard.env.j2
woopstar e40368ae2b Add CoreDNS support with various fixes
Added CoreDNS to downloads

Updated with labels. Should now work without RBAC too

Fix DNS settings on hosts

Rename CoreDNS service from kube-dns to coredns

Add rotate based on http://edgeofsanity.net/rant/2017/12/20/systemd-resolved-is-broken.html

Updated docs with CoreDNS info

Added labels and fixed minor settings from official yaml file: https://github.com/kubernetes/kubernetes/blob/release-1.9/cluster/addons/dns/coredns.yaml.sed

Added a secondary deployment and secondary service ip. This is to mitigate dns timeouts and create high resitency for failures. See discussion at 'https://github.com/coreos/coreos-kubernetes/issues/641#issuecomment-281174806'

Set dns list correct. Thanks to @whereismyjetpack

Only download KubeDNS or CoreDNS if selected

Move dns cleanup to its own file and import tasks based on dns mode

Fix install of KubeDNS when dnsmask_kubedns mode is selected

Add new dns option coredns_dual for dual stack deployment. Added variable to configure replicas deployed. Updated docs for dual stack deployment. Removed rotate option in resolv.conf.

Run DNS manifests for CoreDNS and KubeDNS

Set skydns servers on dual stack deployment

Use only one template for CoreDNS dual deployment

Set correct cluster ip for the dns server
2018-03-16 21:51:37 +01:00

117 lines
6 KiB
Django/Jinja

# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
{% if kube_override_hostname|default('') %}
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{# Base kubelet args #}
{% set kubelet_args_base %}
--pod-manifest-path={{ kube_manifest_dir }} \
--cadvisor-port={{ kube_cadvisor_port }} \
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
--client-ca-file={{ kube_cert_dir }}/ca.pem \
--tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
--anonymous-auth=false \
--read-only-port={{ kube_read_only_port }} \
{% if kube_version | version_compare('v1.6', '>=') %}
{# flag got removed with 1.7.0 #}
{% if kube_version | version_compare('v1.7', '<') %}
--enable-cri={{ kubelet_enable_cri }} \
{% endif %}
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
{% if kube_version | version_compare('v1.8', '<') %}
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% else %}
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% endif %}
{% if kubelet_authentication_token_webhook %}
--authentication-token-webhook \
{% endif %}
{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
{% endif %}
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
{# DNS settings for kubelet #}
{% if dns_mode in ['kubedns', 'coredns'] %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
{% elif dns_mode == 'coredns_dual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
{% elif dns_mode == 'manual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
{% else %}
{% set kubelet_args_cluster_dns %}{% endset %}
{% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{# Location of the apiserver #}
{% if kube_version | version_compare('v1.8', '<') %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
{% else %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
{% endif %}
{% if standalone_kubelet|bool %}
{# We are on a master-only host. Make the master unschedulable in this case. #}
{% if kube_version | version_compare('v1.6', '>=') %}
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %}
{% else %}
{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
{% endif %}
{% endif %}
{# Node reserved CPU/memory #}
{% if is_kube_master|bool %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% else %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% endif %}
{# Kubelet node labels #}
{% if inventory_hostname in groups['kube-master'] %}
{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %}
{% if not standalone_kubelet|bool %}
{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %}
{% endif %}
{% else %}
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
{% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %}
{% if kubelet_flexvolumes_plugins_dir is defined %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
{% endif %}
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
{% elif cloud_provider is defined and cloud_provider == "aws" %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
{% else %}
KUBELET_CLOUDPROVIDER=""
{% endif %}
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin