Upgrade Cilium network plugin to v1.5.5. (#5014)

* Needs an additional cilium-operator deployment.
  * Added option to enable hostPort mappings.
This commit is contained in:
Holger Frydrych 2019-08-06 10:37:55 +02:00 committed by Kubernetes Prow Robot
parent 7cf8ad4dc7
commit bc6de32faf
11 changed files with 566 additions and 213 deletions

View file

@ -117,7 +117,7 @@ Supported Components
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1 - [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
- [calico](https://github.com/projectcalico/calico) v3.7.3 - [calico](https://github.com/projectcalico/calico) v3.7.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.3.0 - [cilium](https://github.com/cilium/cilium) v1.5.5
- [contiv](https://github.com/contiv/install) v1.2.1 - [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0 - [flanneld](https://github.com/coreos/flannel) v0.11.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5 - [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5

View file

@ -73,7 +73,7 @@ cni_version: "v0.8.1"
weave_version: 2.5.2 weave_version: 2.5.2
pod_infra_version: 3.1 pod_infra_version: 3.1
contiv_version: 1.2.1 contiv_version: 1.2.1
cilium_version: "v1.3.0" cilium_version: "v1.5.5"
kube_ovn_version: "v0.6.0" kube_ovn_version: "v0.6.0"
kube_router_version: "v0.2.5" kube_router_version: "v0.2.5"
multus_version: "v3.1.autoconf" multus_version: "v3.1.autoconf"
@ -237,8 +237,10 @@ contiv_ovs_image_repo: "docker.io/contiv/ovs"
contiv_ovs_image_tag: "latest" contiv_ovs_image_tag: "latest"
cilium_image_repo: "docker.io/cilium/cilium" cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}" cilium_image_tag: "{{ cilium_version }}"
cilium_init_image_repo: "docker.io/library/busybox" cilium_init_image_repo: "docker.io/cilium/cilium-init"
cilium_init_image_tag: "1.28.4" cilium_init_image_tag: "2019-04-05"
cilium_operator_image_repo: "docker.io/cilium/operator"
cilium_operator_image_tag: "{{ cilium_version }}"
kube_ovn_db_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-db" kube_ovn_db_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-db"
kube_ovn_node_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-node" kube_ovn_node_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-node"
kube_ovn_cni_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-cni" kube_ovn_cni_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-cni"
@ -415,6 +417,15 @@ downloads:
groups: groups:
- k8s-cluster - k8s-cluster
cilium_operator:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_operator_image_repo }}"
tag: "{{ cilium_operator_image_tag }}"
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
groups:
- k8s-cluster
multus: multus:
enabled: "{{ kube_network_plugin_multus }}" enabled: "{{ kube_network_plugin_multus }}"
container: true container: true

View file

@ -1,7 +1,9 @@
--- ---
# Log-level # Log-level
cilium_debug: false cilium_debug: false
cilium_disable_ipv4: false
cilium_enable_ipv4: true
cilium_enable_ipv6: false
# Etcd SSL dirs # Etcd SSL dirs
cilium_cert_dir: /etc/cilium/certs cilium_cert_dir: /etc/cilium/certs
@ -9,9 +11,6 @@ kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Cilium Network Policy directory
cilium_policy_dir: /etc/kubernetes/policy
# Limits for apps # Limits for apps
cilium_memory_limit: 500M cilium_memory_limit: 500M
cilium_cpu_limit: 500m cilium_cpu_limit: 500m
@ -20,3 +19,12 @@ cilium_cpu_requests: 100m
# Optional features # Optional features
cilium_enable_prometheus: false cilium_enable_prometheus: false
# Enable if you want to make use of hostPort mappings
cilium_enable_portmap: false
# If upgrading from Cilium < 1.5, you may want to override some of these options
# to prevent service disruptions. See also:
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
cilium_preallocate_bpf_maps: false
cilium_tofqdns_enable_poller: false
cilium_enable_legacy_services: false

View file

@ -34,6 +34,7 @@
- {name: cilium, file: cilium-crb.yml, type: clusterrolebinding} - {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
- {name: cilium, file: cilium-cr.yml, type: clusterrole} - {name: cilium, file: cilium-cr.yml, type: clusterrole}
- {name: cilium, file: cilium-ds.yml, type: ds} - {name: cilium, file: cilium-ds.yml, type: ds}
- {name: cilium, file: cilium-deploy.yml, type: deploy}
- {name: cilium, file: cilium-sa.yml, type: sa} - {name: cilium, file: cilium-sa.yml, type: sa}
register: cilium_node_manifests register: cilium_node_manifests
when: when:
@ -48,7 +49,16 @@
mode: 0755 mode: 0755
register: cni_bin_dir register: cni_bin_dir
- name: Cilium | Create network policy directory - name: Cilium | Copy CNI plugins
file: unarchive:
path: "{{ cilium_policy_dir }}" src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
state: directory dest: "/opt/cni/bin"
mode: 0755
remote_src: yes
when: cilium_enable_portmap
- name: Cilium | Enable portmap addon
template:
src: 000-cilium-portmap.conflist.j2
dest: /etc/cni/net.d/000-cilium-portmap.conflist
when: cilium_enable_portmap

View file

@ -0,0 +1,13 @@
{
"cniVersion": "0.3.1",
"name": "cilium-portmap",
"plugins": [
{
"type": "cilium-cni"
},
{
"type": "portmap",
"capabilities": { "portMappings": true }
}
]
}

View file

@ -25,18 +25,76 @@ data:
key-file: "{{ cilium_cert_dir }}/key.pem" key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt" cert-file: "{{ cilium_cert_dir }}/cert.crt"
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
prometheus-serve-addr: ":9090"
{% endif %}
# If you want to run cilium in debug mode change this value to true # If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}" debug: "{{ cilium_debug }}"
disable-ipv4: "{{ cilium_disable_ipv4 }}" enable-ipv4: "{{ cilium_enable_ipv4 }}"
# If you want to clean cilium state; change this value to true enable-ipv6: "{{ cilium_enable_ipv6 }}"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false" clean-cilium-state: "false"
legacy-host-allows-world: "false" # If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level # If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets # to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output. # that will be seen in monitor output.
monitor-aggregation-level: "none" monitor-aggregation-level: "none"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
# Regular expression matching compatible Istio sidecar istio-proxy # Regular expression matching compatible Istio sidecar istio-proxy
# container image names # container image names
sidecar-istio-proxy-image: "cilium/istio_proxy" sidecar-istio-proxy-image: "cilium/istio_proxy"
@ -47,3 +105,38 @@ data:
# - vxlan (default) # - vxlan (default)
# - geneve # - geneve
tunnel: "vxlan" tunnel: "vxlan"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
# DNS Polling periodically issues a DNS lookup for each `matchName` from
# cilium-agent. The result is used to regenerate endpoint policy.
# DNS lookups are repeated with an interval of 5 seconds, and are made for
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
# data is used instead. An IP change will trigger a regeneration of the Cilium
# policy for each endpoint and increment the per cilium-agent policy
# repository revision.
#
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "{{cilium_tofqdns_enable_poller}}"
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
# Enable legacy services (prior v1.5) to prevent from terminating existing
# connections with services when upgrading Cilium from < v1.5 to v1.5.
enable-legacy-services: "{{cilium_enable_legacy_services}}"

70
roles/network_plugin/cilium/templates/cilium-cr.yml.j2 Executable file → Normal file
View file

@ -1,11 +1,58 @@
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
# to get k8s version and status
- componentstatuses
verbs:
- get
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
# to automatically read from k8s and import the node's pod CIDR to cilium's
# etcd so all nodes know how to reach another pod running in in a different
# node.
- nodes
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata: metadata:
name: cilium name: cilium
rules: rules:
- apiGroups: - apiGroups:
- "networking.k8s.io" - networking.k8s.io
resources: resources:
- networkpolicies - networkpolicies
verbs: verbs:
@ -34,11 +81,16 @@ rules:
- list - list
- watch - watch
- update - update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups: - apiGroups:
- extensions - extensions
resources: resources:
- networkpolicies # FIXME remove this when we drop support for k8s NP-beta GH-1202
- thirdpartyresources
- ingresses - ingresses
verbs: verbs:
- create - create
@ -46,7 +98,7 @@ rules:
- list - list
- watch - watch
- apiGroups: - apiGroups:
- "apiextensions.k8s.io" - apiextensions.k8s.io
resources: resources:
- customresourcedefinitions - customresourcedefinitions
verbs: verbs:
@ -63,12 +115,4 @@ rules:
- ciliumendpoints - ciliumendpoints
- ciliumendpoints/status - ciliumendpoints/status
verbs: verbs:
- "*" - '*'
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View file

@ -1,6 +1,19 @@
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata: metadata:
name: cilium name: cilium
roleRef: roleRef:
@ -8,8 +21,9 @@ roleRef:
kind: ClusterRole kind: ClusterRole
name: cilium name: cilium
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: cilium name: cilium
namespace: kube-system namespace: kube-system
- kind: Group - apiGroup: rbac.authorization.k8s.io
name: system:nodes kind: Group
name: system:nodes

View file

@ -0,0 +1,122 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --kvstore=etcd
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command:
- cilium-operator
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_DISABLE_ENDPOINT_CRD
valueFrom:
configMapKeyRef:
key: disable-endpoint-crd
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
image: docker.io/cilium/operator:v1.5.5
imagePullPolicy: IfNotPresent
name: cilium-operator
livenessProbe:
httpGet:
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: "{{cilium_cert_dir}}"
name: etcd-secrets
readOnly: true
dnsPolicy: ClusterFirst
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
volumes:
# To read the etcd config stored in config maps
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"

View file

@ -1,219 +1,251 @@
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
name: cilium name: cilium
namespace: kube-system namespace: kube-system
spec: spec:
updateStrategy:
type: "RollingUpdate"
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
# The current default value is 1 or 100% for daemonsets; Adding an explicit value here
# to avoid confusion, as the default value is specific to the type (daemonset/deployment).
maxUnavailable: "100%"
selector: selector:
matchLabels: matchLabels:
k8s-app: cilium k8s-app: cilium
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
template: template:
metadata: metadata:
annotations:
{% if cilium_enable_prometheus %}
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
{% endif %}
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels: labels:
k8s-app: cilium k8s-app: cilium
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
{% if cilium_enable_prometheus %}
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
{% endif %}
spec: spec:
priorityClassName: system-node-critical
serviceAccountName: cilium
initContainers:
- name: clean-cilium-state
image: {{ cilium_init_image_repo }}:{{ cilium_init_image_tag }}
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi']
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
- name: cilium-run
mountPath: /var/run/cilium
env:
- name: "CLEAN_CILIUM_STATE"
valueFrom:
configMapKeyRef:
name: cilium-config
optional: true
key: clean-cilium-state
containers: containers:
- image: {{ cilium_image_repo }}:{{ cilium_image_tag }} - args:
imagePullPolicy: Always - --kvstore=etcd
name: cilium-agent - --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command: ["cilium-agent"] - --config-dir=/tmp/cilium/config-map
args: command:
- "--debug=$(CILIUM_DEBUG)" - cilium-agent
- "--kvstore=etcd" env:
- "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config" - name: K8S_NODE_NAME
- "--disable-ipv4=$(DISABLE_IPV4)" valueFrom:
{% if cilium_enable_prometheus %} fieldRef:
ports: apiVersion: v1
- name: prometheus fieldPath: spec.nodeName
containerPort: 9090 - name: CILIUM_K8S_NAMESPACE
{% endif %} valueFrom:
lifecycle: fieldRef:
postStart: apiVersion: v1
exec: fieldPath: metadata.namespace
command: - name: CILIUM_CLUSTERMESH_CONFIG
- "/cni-install.sh" value: /var/lib/cilium/clustermesh/
preStop: image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
exec: imagePullPolicy: IfNotPresent
command: resources:
- "/cni-uninstall.sh" limits:
env: cpu: {{ cilium_cpu_limit }}
- name: "K8S_NODE_NAME" memory: {{ cilium_memory_limit }}
valueFrom: requests:
fieldRef: cpu: {{ cilium_cpu_requests }}
fieldPath: spec.nodeName memory: {{ cilium_memory_requests }}
- name: "CILIUM_DEBUG" lifecycle:
valueFrom: postStart:
configMapKeyRef:
name: cilium-config
key: debug
- name: "DISABLE_IPV4"
valueFrom:
configMapKeyRef:
name: cilium-config
key: disable-ipv4
{% if cilium_enable_prometheus %}
# Note: this variable is a no-op if not defined, and is used in the
# prometheus examples.
- name: "CILIUM_PROMETHEUS_SERVE_ADDR"
valueFrom:
configMapKeyRef:
name: cilium-metrics-config
optional: true
key: prometheus-serve-addr
{% endif %}
- name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD"
valueFrom:
configMapKeyRef:
name: cilium-config
optional: true
key: legacy-host-allows-world
- name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE"
valueFrom:
configMapKeyRef:
name: cilium-config
key: sidecar-istio-proxy-image
optional: true
- name: "CILIUM_TUNNEL"
valueFrom:
configMapKeyRef:
key: tunnel
name: cilium-config
optional: true
- name: "CILIUM_MONITOR_AGGREGATION_LEVEL"
valueFrom:
configMapKeyRef:
key: monitor-aggregation-level
name: cilium-config
optional: true
resources:
limits:
cpu: {{ cilium_cpu_limit }}
memory: {{ cilium_memory_limit }}
requests:
cpu: {{ cilium_cpu_requests }}
memory: {{ cilium_memory_requests }}
livenessProbe:
exec: exec:
command: command:
- cilium - /cni-install.sh
- status preStop:
initialDelaySeconds: 15
failureThreshold: 10
periodSeconds: 10
readinessProbe:
exec: exec:
command: command:
- cilium - /cni-uninstall.sh
- status livenessProbe:
initialDelaySeconds: 5 exec:
periodSeconds: 5 command:
volumeMounts: - cilium
- name: bpf-maps - status
mountPath: /sys/fs/bpf - --brief
- name: cilium-run failureThreshold: 10
mountPath: /var/run/cilium # The initial delay for the liveness probe is intentionally large to
- name: cni-path # avoid an endless kill & restart cycle if in the event that the initial
mountPath: /host/opt/cni/bin # bootstrapping takes longer than expected.
- name: etc-cni-netd initialDelaySeconds: 120
mountPath: /host/etc/cni/net.d periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
{% if cilium_enable_prometheus %}
ports:
- containerPort: 9090
hostPort: 9090
name: prometheus
protocol: TCP
{% endif %}
readinessProbe:
exec:
command:
- cilium
- status
- --brief
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
{% if container_manager == 'docker' %} {% if container_manager == 'docker' %}
- name: docker-socket - mountPath: /var/run/docker.sock
mountPath: /var/run/docker.sock name: docker-socket
readOnly: true readOnly: true
{% else %} {% else %}
- name: "{{ container_manager }}-socket" - name: "{{ container_manager }}-socket"
mountPath: {{ cri_socket }} mountPath: {{ cri_socket }}
readOnly: true readOnly: true
{% endif %} {% endif %}
- name: etcd-config-path - mountPath: /var/lib/etcd-config
mountPath: /var/lib/etcd-config name: etcd-config-path
readOnly: true readOnly: true
- name: cilium-certs - mountPath: "{{cilium_cert_dir}}"
mountPath: {{ cilium_cert_dir }} name: etcd-secrets
readOnly: true readOnly: true
securityContext: - mountPath: /var/lib/cilium/clustermesh
capabilities: name: clustermesh-secrets
add: readOnly: true
- "NET_ADMIN" - mountPath: /tmp/cilium/config-map
privileged: true name: cilium-config-path
hostNetwork: true readOnly: true
# Needed to be able to load kernel modules
- mountPath: /lib/modules
name: lib-modules
readOnly: true
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
hostPID: false
initContainers:
- command:
- /init-container.sh
env:
- name: CLEAN_CILIUM_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CLEAN_CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
image: "{{cilium_init_image_repo}}:{{cilium_init_image_tag}}"
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes: volumes:
# To keep state between restarts / upgrades # To keep state between restarts / upgrades
- name: cilium-run - hostPath:
hostPath: path: /var/run/cilium
path: /var/run/cilium type: DirectoryOrCreate
# To keep state between restarts / upgrades name: cilium-run
- name: bpf-maps # To keep state between restarts / upgrades for bpf maps
hostPath: - hostPath:
path: /sys/fs/bpf path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
{% if container_manager == 'docker' %} {% if container_manager == 'docker' %}
# To read docker events from the node # To read docker events from the node
- name: docker-socket - hostPath:
hostPath: path: /var/run/docker.sock
path: /var/run/docker.sock type: Socket
name: docker-socket
{% else %} {% else %}
# To read crio events from the node # To read crio events from the node
- name: {{ container_manager }}-socket - hostPath:
hostPath: path: {{ cri_socket }}
path: {{ cri_socket }} type: Socket
name: {{ container_manager }}-socket
{% endif %} {% endif %}
# To install cilium cni plugin in the host # To install cilium cni plugin in the host
- name: cni-path - hostPath:
hostPath: path: /opt/cni/bin
path: /opt/cni/bin type: DirectoryOrCreate
name: cni-path
# To install cilium cni configuration in the host # To install cilium cni configuration in the host
- name: etc-cni-netd - hostPath:
hostPath: path: /etc/cni/net.d
path: /etc/cni/net.d type: DirectoryOrCreate
name: etc-cni-netd
# To be able to load kernel modules
- hostPath:
path: /lib/modules
name: lib-modules
# To read the etcd config stored in config maps # To read the etcd config stored in config maps
- name: etcd-config-path - configMap:
configMap: defaultMode: 420
name: cilium-config items:
items: - key: etcd-config
- key: etcd-config path: etcd.config
path: etcd.config name: cilium-config
name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS # To read the k8s etcd secrets in case the user might want to use TLS
- name: cilium-certs - name: etcd-secrets
hostPath: hostPath:
path: {{ cilium_cert_dir }} path: "{{cilium_cert_dir}}"
# To read the clustermesh configuration
restartPolicy: Always - name: clustermesh-secrets
tolerations: secret:
- operator: Exists defaultMode: 420
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12) optional: true
- key: CriticalAddonsOnly secretName: cilium-clustermesh
operator: "Exists" # To read the configuration from the config map
- configMap:
name: cilium-config
name: cilium-config-path
updateStrategy:
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
maxUnavailable: 2
type: RollingUpdate

View file

@ -1,6 +1,12 @@
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata: metadata:
name: cilium name: cilium
namespace: kube-system namespace: kube-system