391 lines
14 KiB
Django/Jinja
391 lines
14 KiB
Django/Jinja
---
|
|
# This manifest installs the calico/node container, as well
|
|
# as the Calico CNI plugins and network config on
|
|
# each master and worker node in a Kubernetes cluster.
|
|
kind: DaemonSet
|
|
apiVersion: apps/v1
|
|
metadata:
|
|
name: calico-node
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: calico-node
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: calico-node
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: calico-node
|
|
annotations:
|
|
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
|
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
|
|
{% if calico_felix_prometheusmetricsenabled %}
|
|
prometheus.io/scrape: 'true'
|
|
prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}"
|
|
{% endif %}
|
|
spec:
|
|
priorityClassName: system-node-critical
|
|
hostNetwork: true
|
|
serviceAccountName: calico-node
|
|
tolerations:
|
|
- operator: Exists
|
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
|
terminationGracePeriodSeconds: 0
|
|
{% if calico_version is version('v3.4.0', '>=') %}
|
|
initContainers:
|
|
{% if calico_datastore == "kdd" and calico_version is version('v3.6.0', '>=') %}
|
|
# This container performs upgrade from host-local IPAM to calico-ipam.
|
|
# It can be deleted if this is a fresh installation, or if you have already
|
|
# upgraded to use calico-ipam.
|
|
- name: upgrade-ipam
|
|
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
|
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
|
env:
|
|
- name: KUBERNETES_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
- name: CALICO_NETWORKING_BACKEND
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: calico_backend
|
|
volumeMounts:
|
|
- mountPath: /var/lib/cni/networks
|
|
name: host-local-net-dir
|
|
- mountPath: /host/opt/cni/bin
|
|
name: cni-bin-dir
|
|
securityContext:
|
|
privileged: true
|
|
{% endif %}
|
|
# This container installs the Calico CNI binaries
|
|
# and CNI network config file on each node.
|
|
- name: install-cni
|
|
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
|
command: ["/install-cni.sh"]
|
|
env:
|
|
# Name of the CNI config file to create.
|
|
- name: CNI_CONF_NAME
|
|
value: "10-calico.conflist"
|
|
# Install CNI binaries
|
|
- name: UPDATE_CNI_BINARIES
|
|
value: "true"
|
|
# The CNI network config to install on each node.
|
|
- name: CNI_NETWORK_CONFIG_FILE
|
|
value: "/host/etc/cni/net.d/calico.conflist.template"
|
|
# Prevents the container from sleeping forever.
|
|
- name: SLEEP
|
|
value: "false"
|
|
{% if calico_datastore == "kdd" %}
|
|
# Set the hostname based on the k8s node name.
|
|
- name: KUBERNETES_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
{% endif %}
|
|
volumeMounts:
|
|
- mountPath: /host/etc/cni/net.d
|
|
name: cni-net-dir
|
|
- mountPath: /host/opt/cni/bin
|
|
name: cni-bin-dir
|
|
securityContext:
|
|
privileged: true
|
|
{% endif %}
|
|
containers:
|
|
{% if calico_version is version('v3.3.0', '>=') and calico_version is version('v3.4.0', '<') %}
|
|
- name: install-cni
|
|
image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }}
|
|
command: ["/install-cni.sh"]
|
|
env:
|
|
# Name of the CNI config file to create.
|
|
- name: CNI_CONF_NAME
|
|
value: "10-calico.conflist"
|
|
# Install CNI binaries
|
|
- name: UPDATE_CNI_BINARIES
|
|
value: "true"
|
|
# The CNI network config to install on each node.
|
|
- name: CNI_NETWORK_CONFIG_FILE
|
|
value: "/host/etc/cni/net.d/calico.conflist.template"
|
|
# Prevents the container from sleeping forever.
|
|
- name: SLEEP
|
|
value: "false"
|
|
volumeMounts:
|
|
- mountPath: /host/etc/cni/net.d
|
|
name: cni-net-dir
|
|
- mountPath: /host/opt/cni/bin
|
|
name: cni-bin-dir
|
|
{% endif %}
|
|
# Runs calico/node container on each Kubernetes node. This
|
|
# container programs network policy and routes on each
|
|
# host.
|
|
- name: calico-node
|
|
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
|
env:
|
|
# The location of the Calico etcd cluster.
|
|
{% if calico_datastore == "etcd" %}
|
|
- name: ETCD_ENDPOINTS
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_endpoints
|
|
# Location of the CA certificate for etcd.
|
|
- name: ETCD_CA_CERT_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_ca
|
|
# Location of the client key for etcd.
|
|
- name: ETCD_KEY_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_key
|
|
# Location of the client certificate for etcd.
|
|
- name: ETCD_CERT_FILE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: etcd_cert
|
|
{% elif calico_datastore == "kdd" %}
|
|
# Use Kubernetes API as the backing datastore.
|
|
- name: DATASTORE_TYPE
|
|
value: "kubernetes"
|
|
{% if typha_enabled %}
|
|
# Typha support: controlled by the ConfigMap.
|
|
- name: FELIX_TYPHAK8SSERVICENAME
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: typha_service_name
|
|
{% if typha_secure %}
|
|
- name: FELIX_TYPHACN
|
|
value: typha-server
|
|
- name: FELIX_TYPHACAFILE
|
|
value: /etc/typha-ca/ca.crt
|
|
- name: FELIX_TYPHACERTFILE
|
|
value: /etc/typha-client/typha-client.crt
|
|
- name: FELIX_TYPHAKEYFILE
|
|
value: /etc/typha-client/typha-client.key
|
|
{% endif %}
|
|
{% endif %}
|
|
# Wait for the datastore.
|
|
- name: WAIT_FOR_DATASTORE
|
|
value: "true"
|
|
{% endif %}
|
|
# Choose the backend to use.
|
|
- name: CALICO_NETWORKING_BACKEND
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: calico_backend
|
|
# Cluster type to identify the deployment type
|
|
- name: CLUSTER_TYPE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: calico-config
|
|
key: cluster_type
|
|
# Set noderef for node controller.
|
|
- name: CALICO_K8S_NODE_REF
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
# Disable file logging so `kubectl logs` works.
|
|
- name: CALICO_DISABLE_FILE_LOGGING
|
|
value: "true"
|
|
# Set Felix endpoint to host default action to ACCEPT.
|
|
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
|
value: "{{ calico_endpoint_to_host_action|default('RETURN') }}"
|
|
- name: FELIX_HEALTHHOST
|
|
value: "{{ calico_healthhost }}"
|
|
{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
|
|
- name: FELIX_KUBENODEPORTRANGES
|
|
value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}"
|
|
{% endif %}
|
|
{% if calico_version is version('v3.8.1', '>=') %}
|
|
- name: FELIX_IPTABLESBACKEND
|
|
value: "{{ calico_iptables_backend }}"
|
|
{% endif %}
|
|
- name: FELIX_IPTABLESLOCKTIMEOUTSECS
|
|
value: "{{ calico_iptables_lock_timeout_secs }}"
|
|
# should be set in etcd before deployment
|
|
# # Configure the IP Pool from which Pod IPs will be chosen.
|
|
# - name: CALICO_IPV4POOL_CIDR
|
|
# value: "{{ calico_pool_cidr | default(kube_pods_subnet) }}"
|
|
{% if calico_veth_mtu is defined %}
|
|
# Set MTU for the Wireguard tunnel device.
|
|
- name: FELIX_WIREGUARDMTU
|
|
value: "{{ calico_veth_mtu }}"
|
|
{% endif %}
|
|
- name: CALICO_IPV4POOL_IPIP
|
|
value: "{{ calico_ipv4pool_ipip }}"
|
|
# Disable IPv6 on Kubernetes.
|
|
- name: FELIX_IPV6SUPPORT
|
|
value: "false"
|
|
# Set Felix logging to "info"
|
|
- name: FELIX_LOGSEVERITYSCREEN
|
|
value: "{{ calico_loglevel }}"
|
|
# Enable or disable usage report
|
|
- name: FELIX_USAGEREPORTINGENABLED
|
|
value: "{{ calico_usage_reporting }}"
|
|
# Set MTU for tunnel device used if ipip is enabled
|
|
{% if calico_mtu is defined %}
|
|
- name: FELIX_IPINIPMTU
|
|
value: "{{ calico_mtu }}"
|
|
{% endif %}
|
|
- name: FELIX_CHAININSERTMODE
|
|
value: "{{ calico_felix_chaininsertmode }}"
|
|
- name: FELIX_PROMETHEUSMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusmetricsenabled }}"
|
|
- name: FELIX_PROMETHEUSMETRICSPORT
|
|
value: "{{ calico_felix_prometheusmetricsport }}"
|
|
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusgometricsenabled }}"
|
|
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
|
|
value: "{{ calico_felix_prometheusprocessmetricsenabled }}"
|
|
{% if calico_version is version('v3.4.0', '>=') and calico_advertise_cluster_ips|default(false) %}
|
|
- name: CALICO_ADVERTISE_CLUSTER_IPS
|
|
value: "{{ kube_service_addresses }}"
|
|
{% endif %}
|
|
{% if calico_ip_auto_method is defined %}
|
|
- name: IP_AUTODETECTION_METHOD
|
|
value: "{{ calico_ip_auto_method }}"
|
|
- name: IP
|
|
value: "autodetect"
|
|
{% else %}
|
|
- name: IP
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: status.hostIP
|
|
{% endif %}
|
|
- name: NODENAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
- name: FELIX_HEALTHENABLED
|
|
value: "true"
|
|
- name: FELIX_IGNORELOOSERPF
|
|
value: "{{ calico_node_ignorelooserpf }}"
|
|
securityContext:
|
|
privileged: true
|
|
resources:
|
|
limits:
|
|
cpu: {{ calico_node_cpu_limit }}
|
|
memory: {{ calico_node_memory_limit }}
|
|
requests:
|
|
cpu: {{ calico_node_cpu_requests }}
|
|
memory: {{ calico_node_memory_requests }}
|
|
livenessProbe:
|
|
{% if calico_version is version('v3.8.0', '<') %}
|
|
httpGet:
|
|
host: 127.0.0.1
|
|
path: /liveness
|
|
port: 9099
|
|
{% else %}
|
|
exec:
|
|
command:
|
|
- /bin/calico-node
|
|
- -felix-live
|
|
{% if calico_network_backend|default("bird") == "bird" %}
|
|
- -bird-live
|
|
{% endif %}
|
|
{% endif %}
|
|
initialDelaySeconds: 5
|
|
failureThreshold: 6
|
|
readinessProbe:
|
|
failureThreshold: 6
|
|
{% if calico_version is version('v3.3.0', '<') %}
|
|
httpGet:
|
|
host: 127.0.0.1
|
|
path: /readiness
|
|
port: 9099
|
|
{% else %}
|
|
exec:
|
|
command:
|
|
- /bin/calico-node
|
|
{% if calico_network_backend|default("bird") == "bird" %}
|
|
- -bird-ready
|
|
{% endif %}
|
|
- -felix-ready
|
|
{% endif %}
|
|
volumeMounts:
|
|
- mountPath: /lib/modules
|
|
name: lib-modules
|
|
readOnly: true
|
|
- mountPath: /var/run/calico
|
|
name: var-run-calico
|
|
- mountPath: /var/lib/calico
|
|
name: var-lib-calico
|
|
readOnly: false
|
|
{% if calico_datastore == "etcd" %}
|
|
- mountPath: /calico-secrets
|
|
name: etcd-certs
|
|
{% endif %}
|
|
- name: xtables-lock
|
|
mountPath: /run/xtables.lock
|
|
readOnly: false
|
|
{% if typha_secure %}
|
|
- name: typha-client
|
|
mountPath: /etc/typha-client
|
|
readOnly: true
|
|
- name: typha-cacert
|
|
subPath: ca.crt
|
|
mountPath: /etc/typha-ca/ca.crt
|
|
readOnly: true
|
|
{% endif %}
|
|
|
|
volumes:
|
|
# Used by calico/node.
|
|
- name: lib-modules
|
|
hostPath:
|
|
path: /lib/modules
|
|
- name: var-run-calico
|
|
hostPath:
|
|
path: /var/run/calico
|
|
- name: var-lib-calico
|
|
hostPath:
|
|
path: /var/lib/calico
|
|
# Used to install CNI.
|
|
- name: cni-net-dir
|
|
hostPath:
|
|
path: /etc/cni/net.d
|
|
- name: cni-bin-dir
|
|
hostPath:
|
|
path: /opt/cni/bin
|
|
{% if calico_datastore == "etcd" %}
|
|
# Mount in the etcd TLS secrets.
|
|
- name: etcd-certs
|
|
hostPath:
|
|
path: "{{ calico_cert_dir }}"
|
|
{% endif %}
|
|
# Mount the global iptables lock file, used by calico/node
|
|
- name: xtables-lock
|
|
hostPath:
|
|
path: /run/xtables.lock
|
|
type: FileOrCreate
|
|
{% if calico_datastore == "kdd" and calico_version is version('v3.6.0', '>=') %}
|
|
# Mount in the directory for host-local IPAM allocations. This is
|
|
# used when upgrading from host-local to calico-ipam, and can be removed
|
|
# if not using the upgrade-ipam init container.
|
|
- name: host-local-net-dir
|
|
hostPath:
|
|
path: /var/lib/cni/networks
|
|
{% endif %}
|
|
{% if typha_enabled and typha_secure %}
|
|
- name: typha-client
|
|
secret:
|
|
secretName: typha-client
|
|
items:
|
|
- key: tls.crt
|
|
path: typha-client.crt
|
|
- key: tls.key
|
|
path: typha-client.key
|
|
- name: typha-cacert
|
|
hostPath:
|
|
path: "/etc/kubernetes/ssl/"
|
|
{% endif %}
|
|
updateStrategy:
|
|
rollingUpdate:
|
|
maxUnavailable: {{ serial | default('20%') }}
|
|
type: RollingUpdate
|