c12s-kubespray/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
Erwan Miran 7bec169d58 Fix ansible syntax to avoid ansible deprecation warnings (#3512)
* failed

* version_compare

* succeeded

* skipped

* success

* version_compare becomes version since ansible 2.5

* ansible minimal version updated in doc and spec

* last version_compare
2018-10-16 15:33:30 -07:00

229 lines
7.7 KiB
Django/Jinja
Executable file

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
spec:
updateStrategy:
type: "RollingUpdate"
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
# The current default value is 1 or 100% for daemonsets; Adding an explicit value here
# to avoid confusion, as the default value is specific to the type (daemonset/deployment).
maxUnavailable: "100%"
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
template:
metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
annotations:
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ''
{% if cilium_enable_prometheus %}
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
{% endif %}
spec:
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-node-critical
{% endif %}
serviceAccountName: cilium
initContainers:
- name: clean-cilium-state
image: docker.io/library/busybox:1.28.4
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi']
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
- name: cilium-run
mountPath: /var/run/cilium
env:
- name: "CLEAN_CILIUM_STATE"
valueFrom:
configMapKeyRef:
name: cilium-config
optional: true
key: clean-cilium-state
containers:
- image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
imagePullPolicy: Always
name: cilium-agent
command: ["cilium-agent"]
args:
- "--debug=$(CILIUM_DEBUG)"
- "--kvstore=etcd"
- "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config"
- "--disable-ipv4=$(DISABLE_IPV4)"
{% if cilium_enable_prometheus %}
ports:
- name: prometheus
containerPort: 9090
{% endif %}
lifecycle:
postStart:
exec:
command:
- "/cni-install.sh"
preStop:
exec:
command:
- "/cni-uninstall.sh"
env:
- name: "K8S_NODE_NAME"
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: "CILIUM_DEBUG"
valueFrom:
configMapKeyRef:
name: cilium-config
key: debug
- name: "DISABLE_IPV4"
valueFrom:
configMapKeyRef:
name: cilium-config
key: disable-ipv4
{% if cilium_enable_prometheus %}
# Note: this variable is a no-op if not defined, and is used in the
# prometheus examples.
- name: "CILIUM_PROMETHEUS_SERVE_ADDR"
valueFrom:
configMapKeyRef:
name: cilium-metrics-config
optional: true
key: prometheus-serve-addr
{% endif %}
- name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD"
valueFrom:
configMapKeyRef:
name: cilium-config
optional: true
key: legacy-host-allows-world
- name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE"
valueFrom:
configMapKeyRef:
name: cilium-config
key: sidecar-istio-proxy-image
optional: true
- name: "CILIUM_TUNNEL"
valueFrom:
configMapKeyRef:
key: tunnel
name: cilium-config
optional: true
- name: "CILIUM_MONITOR_AGGREGATION_LEVEL"
valueFrom:
configMapKeyRef:
key: monitor-aggregation-level
name: cilium-config
optional: true
resources:
limits:
cpu: {{ cilium_cpu_limit }}
memory: {{ cilium_memory_limit }}
requests:
cpu: {{ cilium_cpu_requests }}
memory: {{ cilium_memory_requests }}
livenessProbe:
exec:
command:
- cilium
- status
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
failureThreshold: 10
periodSeconds: 10
readinessProbe:
exec:
command:
- cilium
- status
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
- name: cilium-run
mountPath: /var/run/cilium
- name: cni-path
mountPath: /host/opt/cni/bin
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
{% if container_manager == 'crio' %}
- name: crio-socket
mountPath: /var/run/crio.sock
readOnly: true
{% else %}
- name: docker-socket
mountPath: /var/run/docker.sock
readOnly: true
{% endif %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: cilium-certs
mountPath: {{ cilium_cert_dir }}
readOnly: true
securityContext:
capabilities:
add:
- "NET_ADMIN"
privileged: true
hostNetwork: true
volumes:
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
# To keep state between restarts / upgrades
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
{% if container_manager == 'crio' %}
# To read crio events from the node
- name: crio-socket
hostPath:
path: /var/run/crio/crio.sock
{% else %}
# To read docker events from the node
- name: docker-socket
hostPath:
path: /var/run/docker.sock
{% endif %}
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: cilium-certs
hostPath:
path: {{ cilium_cert_dir }}
restartPolicy: Always
tolerations:
- operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"