c12s-kubespray/roles/kubernetes/node/templates/kubelet.standard.env.j2
Christopher J. Ruwe c1bc4615fe assert that number of pods on node does not exceed CIDR address range
The number of pods on a given node is determined by the  --max-pods=k
directive. When the address space is exhausted, no more pods can be
scheduled even if from the --max-pods-perspective, the node still has
capacity.

The special case that a pod is scheduled and uses the node IP in the
host network namespace is too "soft" to derive a guarantee.

Comparing kubelet_max_pods with kube_network_node_prefix when given
allows to assert that pod limits match the CIDR address space.
2018-05-16 11:55:46 +00:00

126 lines
6.5 KiB
Django/Jinja

# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
{% if kube_override_hostname|default('') %}
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{# Base kubelet args #}
{% set kubelet_args_base %}
--pod-manifest-path={{ kube_manifest_dir }} \
--cadvisor-port={{ kube_cadvisor_port }} \
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
--client-ca-file={{ kube_cert_dir }}/ca.pem \
--tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
--anonymous-auth=false \
--read-only-port={{ kube_read_only_port }} \
{% if kube_version | version_compare('v1.6', '>=') %}
{# flag got removed with 1.7.0 #}
{% if kube_version | version_compare('v1.7', '<') %}
--enable-cri={{ kubelet_enable_cri }} \
{% endif %}
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
--max-pods={{ kubelet_max_pods }} \
{% if kube_version | version_compare('v1.8', '<') %}
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% else %}
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% endif %}
{% if kubelet_authentication_token_webhook %}
--authentication-token-webhook \
{% endif %}
{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
{% endif %}
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
{# DNS settings for kubelet #}
{% if dns_mode in ['kubedns', 'coredns'] %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
{% elif dns_mode == 'coredns_dual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
{% elif dns_mode == 'manual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
{% else %}
{% set kubelet_args_cluster_dns %}{% endset %}
{% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{# Location of the apiserver #}
{% if kube_version | version_compare('v1.8', '<') %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
{% else %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
{% endif %}
{% if standalone_kubelet|bool %}
{# We are on a master-only host. Make the master unschedulable in this case. #}
{% if kube_version | version_compare('v1.6', '>=') %}
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %}
{% else %}
{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
{% endif %}
{% endif %}
{# Node reserved CPU/memory #}
{% if is_kube_master|bool %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% else %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% endif %}
{# Kubelet node labels #}
{% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
{% if not standalone_kubelet|bool %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
{% else %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
{% if inventory_hostname in groups['kube-ingress']|default([]) %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
{% endif %}
{% set inventory_node_labels = [] %}
{% if node_labels is defined %}
{% for labelname, labelvalue in node_labels.iteritems() %}
{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
{% endfor %}
{% endif %}
{% set all_node_labels = role_node_labels + inventory_node_labels %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
{% elif cloud_provider is defined and cloud_provider == "aws" %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
{% else %}
KUBELET_CLOUDPROVIDER=""
{% endif %}
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin