Upgrade Cilium network plugin to v1.5.5. (#5014)
* Needs an additional cilium-operator deployment. * Added option to enable hostPort mappings.
This commit is contained in:
parent
7cf8ad4dc7
commit
bc6de32faf
11 changed files with 566 additions and 213 deletions
|
@ -117,7 +117,7 @@ Supported Components
|
|||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||
|
|
|
@ -73,7 +73,7 @@ cni_version: "v0.8.1"
|
|||
weave_version: 2.5.2
|
||||
pod_infra_version: 3.1
|
||||
contiv_version: 1.2.1
|
||||
cilium_version: "v1.3.0"
|
||||
cilium_version: "v1.5.5"
|
||||
kube_ovn_version: "v0.6.0"
|
||||
kube_router_version: "v0.2.5"
|
||||
multus_version: "v3.1.autoconf"
|
||||
|
@ -237,8 +237,10 @@ contiv_ovs_image_repo: "docker.io/contiv/ovs"
|
|||
contiv_ovs_image_tag: "latest"
|
||||
cilium_image_repo: "docker.io/cilium/cilium"
|
||||
cilium_image_tag: "{{ cilium_version }}"
|
||||
cilium_init_image_repo: "docker.io/library/busybox"
|
||||
cilium_init_image_tag: "1.28.4"
|
||||
cilium_init_image_repo: "docker.io/cilium/cilium-init"
|
||||
cilium_init_image_tag: "2019-04-05"
|
||||
cilium_operator_image_repo: "docker.io/cilium/operator"
|
||||
cilium_operator_image_tag: "{{ cilium_version }}"
|
||||
kube_ovn_db_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-db"
|
||||
kube_ovn_node_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-node"
|
||||
kube_ovn_cni_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-cni"
|
||||
|
@ -415,6 +417,15 @@ downloads:
|
|||
groups:
|
||||
- k8s-cluster
|
||||
|
||||
cilium_operator:
|
||||
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
||||
container: true
|
||||
repo: "{{ cilium_operator_image_repo }}"
|
||||
tag: "{{ cilium_operator_image_tag }}"
|
||||
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
|
||||
multus:
|
||||
enabled: "{{ kube_network_plugin_multus }}"
|
||||
container: true
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
---
|
||||
# Log-level
|
||||
cilium_debug: false
|
||||
cilium_disable_ipv4: false
|
||||
|
||||
cilium_enable_ipv4: true
|
||||
cilium_enable_ipv6: false
|
||||
|
||||
# Etcd SSL dirs
|
||||
cilium_cert_dir: /etc/cilium/certs
|
||||
|
@ -9,9 +11,6 @@ kube_etcd_cacert_file: ca.pem
|
|||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Cilium Network Policy directory
|
||||
cilium_policy_dir: /etc/kubernetes/policy
|
||||
|
||||
# Limits for apps
|
||||
cilium_memory_limit: 500M
|
||||
cilium_cpu_limit: 500m
|
||||
|
@ -20,3 +19,12 @@ cilium_cpu_requests: 100m
|
|||
|
||||
# Optional features
|
||||
cilium_enable_prometheus: false
|
||||
# Enable if you want to make use of hostPort mappings
|
||||
cilium_enable_portmap: false
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
cilium_preallocate_bpf_maps: false
|
||||
cilium_tofqdns_enable_poller: false
|
||||
cilium_enable_legacy_services: false
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
- {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
|
||||
- {name: cilium, file: cilium-cr.yml, type: clusterrole}
|
||||
- {name: cilium, file: cilium-ds.yml, type: ds}
|
||||
- {name: cilium, file: cilium-deploy.yml, type: deploy}
|
||||
- {name: cilium, file: cilium-sa.yml, type: sa}
|
||||
register: cilium_node_manifests
|
||||
when:
|
||||
|
@ -48,7 +49,16 @@
|
|||
mode: 0755
|
||||
register: cni_bin_dir
|
||||
|
||||
- name: Cilium | Create network policy directory
|
||||
file:
|
||||
path: "{{ cilium_policy_dir }}"
|
||||
state: directory
|
||||
- name: Cilium | Copy CNI plugins
|
||||
unarchive:
|
||||
src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
dest: "/opt/cni/bin"
|
||||
mode: 0755
|
||||
remote_src: yes
|
||||
when: cilium_enable_portmap
|
||||
|
||||
- name: Cilium | Enable portmap addon
|
||||
template:
|
||||
src: 000-cilium-portmap.conflist.j2
|
||||
dest: /etc/cni/net.d/000-cilium-portmap.conflist
|
||||
when: cilium_enable_portmap
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "cilium-portmap",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "cilium-cni"
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": { "portMappings": true }
|
||||
}
|
||||
]
|
||||
}
|
99
roles/network_plugin/cilium/templates/cilium-config.yml.j2
Executable file → Normal file
99
roles/network_plugin/cilium/templates/cilium-config.yml.j2
Executable file → Normal file
|
@ -25,18 +25,76 @@ data:
|
|||
key-file: "{{ cilium_cert_dir }}/key.pem"
|
||||
cert-file: "{{ cilium_cert_dir }}/cert.crt"
|
||||
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
{% if cilium_enable_prometheus %}
|
||||
prometheus-serve-addr: ":9090"
|
||||
{% endif %}
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "{{ cilium_debug }}"
|
||||
disable-ipv4: "{{ cilium_disable_ipv4 }}"
|
||||
# If you want to clean cilium state; change this value to true
|
||||
enable-ipv4: "{{ cilium_enable_ipv4 }}"
|
||||
enable-ipv6: "{{ cilium_enable_ipv6 }}"
|
||||
# If a serious issue occurs during Cilium startup, this
|
||||
# invasive option may be set to true to remove all persistent
|
||||
# state. Endpoints will not be restored using knowledge from a
|
||||
# prior Cilium run, so they may receive new IP addresses upon
|
||||
# restart. This also triggers clean-cilium-bpf-state.
|
||||
clean-cilium-state: "false"
|
||||
legacy-host-allows-world: "false"
|
||||
# If you want to clean cilium BPF state, set this to true;
|
||||
# Removes all BPF maps from the filesystem. Upon restart,
|
||||
# endpoints are restored with the same IP addresses, however
|
||||
# any ongoing connections may be disrupted briefly.
|
||||
# Loadbalancing decisions will be reset, so any ongoing
|
||||
# connections via a service may be loadbalanced to a different
|
||||
# backend after restart.
|
||||
clean-cilium-bpf-state: "false"
|
||||
|
||||
# Users who wish to specify their own custom CNI configuration file must set
|
||||
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
||||
custom-cni-conf: "false"
|
||||
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation-level: "none"
|
||||
|
||||
# ct-global-max-entries-* specifies the maximum number of connections
|
||||
# supported across all endpoints, split by protocol: tcp or other. One pair
|
||||
# of maps uses these values for IPv4 connections, and another pair of maps
|
||||
# use these values for IPv6 connections.
|
||||
#
|
||||
# If these values are modified, then during the next Cilium startup the
|
||||
# tracking of ongoing connections may be disrupted. This may lead to brief
|
||||
# policy drops or a change in loadbalancing decisions for a connection.
|
||||
#
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, comment out these options.
|
||||
bpf-ct-global-tcp-max: "524288"
|
||||
bpf-ct-global-any-max: "262144"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# This may lead to policy drops or a change in loadbalancing decisions for a
|
||||
# connection for some time. Endpoints may need to be recreated to restore
|
||||
# connectivity.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
@ -47,3 +105,38 @@ data:
|
|||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: "vxlan"
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
#cluster-id: 1
|
||||
|
||||
# DNS Polling periodically issues a DNS lookup for each `matchName` from
|
||||
# cilium-agent. The result is used to regenerate endpoint policy.
|
||||
# DNS lookups are repeated with an interval of 5 seconds, and are made for
|
||||
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
|
||||
# data is used instead. An IP change will trigger a regeneration of the Cilium
|
||||
# policy for each endpoint and increment the per cilium-agent policy
|
||||
# repository revision.
|
||||
#
|
||||
# This option is disabled by default starting from version 1.4.x in favor
|
||||
# of a more powerful DNS proxy-based implementation, see [0] for details.
|
||||
# Enable this option if you want to use FQDN policies but do not want to use
|
||||
# the DNS proxy.
|
||||
#
|
||||
# To ease upgrade, users may opt to set this option to "true".
|
||||
# Otherwise please refer to the Upgrade Guide [1] which explains how to
|
||||
# prepare policy rules for upgrade.
|
||||
#
|
||||
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
|
||||
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
tofqdns-enable-poller: "{{cilium_tofqdns_enable_poller}}"
|
||||
|
||||
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
|
||||
wait-bpf-mount: "false"
|
||||
|
||||
# Enable legacy services (prior v1.5) to prevent from terminating existing
|
||||
# connections with services when upgrading Cilium from < v1.5 to v1.5.
|
||||
enable-legacy-services: "{{cilium_enable_legacy_services}}"
|
||||
|
|
70
roles/network_plugin/cilium/templates/cilium-cr.yml.j2
Executable file → Normal file
70
roles/network_plugin/cilium/templates/cilium-cr.yml.j2
Executable file → Normal file
|
@ -1,11 +1,58 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to get k8s version and status
|
||||
- componentstatuses
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically read from k8s and import the node's pod CIDR to cilium's
|
||||
# etcd so all nodes know how to reach another pod running in in a different
|
||||
# node.
|
||||
- nodes
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
|
@ -34,11 +81,16 @@ rules:
|
|||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- networkpolicies # FIXME remove this when we drop support for k8s NP-beta GH-1202
|
||||
- thirdpartyresources
|
||||
- ingresses
|
||||
verbs:
|
||||
- create
|
||||
|
@ -46,7 +98,7 @@ rules:
|
|||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apiextensions.k8s.io"
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
|
@ -63,12 +115,4 @@ rules:
|
|||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- privileged
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
- '*'
|
||||
|
|
|
@ -1,6 +1,19 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
|
@ -11,5 +24,6 @@ subjects:
|
|||
- kind: ServiceAccount
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
- kind: Group
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: system:nodes
|
||||
|
|
122
roles/network_plugin/cilium/templates/cilium-deploy.yml.j2
Normal file
122
roles/network_plugin/cilium/templates/cilium-deploy.yml.j2
Normal file
|
@ -0,0 +1,122 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
- --kvstore=etcd
|
||||
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
|
||||
command:
|
||||
- cilium-operator
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTER_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cluster-name
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTER_ID
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cluster-id
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_DISABLE_ENDPOINT_CRD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: disable-endpoint-crd
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_DEFAULT_REGION
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
image: docker.io/cilium/operator:v1.5.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cilium-operator
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/etcd-config
|
||||
name: etcd-config-path
|
||||
readOnly: true
|
||||
- mountPath: "{{cilium_cert_dir}}"
|
||||
name: etcd-secrets
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
priorityClassName: system-node-critical
|
||||
restartPolicy: Always
|
||||
serviceAccount: cilium-operator
|
||||
serviceAccountName: cilium-operator
|
||||
volumes:
|
||||
# To read the etcd config stored in config maps
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
name: cilium-config
|
||||
name: etcd-config-path
|
||||
# To read the k8s etcd secrets in case the user might want to use TLS
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: "{{cilium_cert_dir}}"
|
|
@ -1,123 +1,55 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
# Specifies the maximum number of Pods that can be unavailable during the update process.
|
||||
# The current default value is 1 or 100% for daemonsets; Adding an explicit value here
|
||||
# to avoid confusion, as the default value is specific to the type (daemonset/deployment).
|
||||
maxUnavailable: "100%"
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{% if cilium_enable_prometheus %}
|
||||
prometheus.io/port: "9090"
|
||||
prometheus.io/scrape: "true"
|
||||
{% endif %}
|
||||
# This annotation plus the CriticalAddonsOnly toleration makes
|
||||
# cilium to be a critical pod in the cluster, which ensures cilium
|
||||
# gets priority scheduling.
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
kubernetes.io/cluster-service: "true"
|
||||
{% if cilium_enable_prometheus %}
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9090"
|
||||
{% endif %}
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: cilium
|
||||
initContainers:
|
||||
- name: clean-cilium-state
|
||||
image: {{ cilium_init_image_repo }}:{{ cilium_init_image_tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi']
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
env:
|
||||
- name: "CLEAN_CILIUM_STATE"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
optional: true
|
||||
key: clean-cilium-state
|
||||
containers:
|
||||
- image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
|
||||
imagePullPolicy: Always
|
||||
name: cilium-agent
|
||||
command: ["cilium-agent"]
|
||||
args:
|
||||
- "--debug=$(CILIUM_DEBUG)"
|
||||
- "--kvstore=etcd"
|
||||
- "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config"
|
||||
- "--disable-ipv4=$(DISABLE_IPV4)"
|
||||
{% if cilium_enable_prometheus %}
|
||||
ports:
|
||||
- name: prometheus
|
||||
containerPort: 9090
|
||||
{% endif %}
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
- args:
|
||||
- --kvstore=etcd
|
||||
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-uninstall.sh"
|
||||
- cilium-agent
|
||||
env:
|
||||
- name: "K8S_NODE_NAME"
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: "CILIUM_DEBUG"
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: debug
|
||||
- name: "DISABLE_IPV4"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: disable-ipv4
|
||||
{% if cilium_enable_prometheus %}
|
||||
# Note: this variable is a no-op if not defined, and is used in the
|
||||
# prometheus examples.
|
||||
- name: "CILIUM_PROMETHEUS_SERVE_ADDR"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-metrics-config
|
||||
optional: true
|
||||
key: prometheus-serve-addr
|
||||
{% endif %}
|
||||
- name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
optional: true
|
||||
key: legacy-host-allows-world
|
||||
- name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: sidecar-istio-proxy-image
|
||||
optional: true
|
||||
- name: "CILIUM_TUNNEL"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: tunnel
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: "CILIUM_MONITOR_AGGREGATION_LEVEL"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: monitor-aggregation-level
|
||||
name: cilium-config
|
||||
optional: true
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ cilium_cpu_limit }}
|
||||
|
@ -125,95 +57,195 @@ spec:
|
|||
requests:
|
||||
cpu: {{ cilium_cpu_requests }}
|
||||
memory: {{ cilium_memory_requests }}
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- /cni-install.sh
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cilium
|
||||
- status
|
||||
initialDelaySeconds: 15
|
||||
- --brief
|
||||
failureThreshold: 10
|
||||
periodSeconds: 10
|
||||
# The initial delay for the liveness probe is intentionally large to
|
||||
# avoid an endless kill & restart cycle if in the event that the initial
|
||||
# bootstrapping takes longer than expected.
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: cilium-agent
|
||||
{% if cilium_enable_prometheus %}
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
hostPort: 9090
|
||||
name: prometheus
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cilium
|
||||
- status
|
||||
- --brief
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- SYS_MODULE
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
- name: cni-path
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: etc-cni-netd
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-path
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: etc-cni-netd
|
||||
{% if container_manager == 'docker' %}
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
- mountPath: /var/run/docker.sock
|
||||
name: docker-socket
|
||||
readOnly: true
|
||||
{% else %}
|
||||
- name: "{{ container_manager }}-socket"
|
||||
mountPath: {{ cri_socket }}
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
- name: etcd-config-path
|
||||
mountPath: /var/lib/etcd-config
|
||||
- mountPath: /var/lib/etcd-config
|
||||
name: etcd-config-path
|
||||
readOnly: true
|
||||
- name: cilium-certs
|
||||
mountPath: {{ cilium_cert_dir }}
|
||||
- mountPath: "{{cilium_cert_dir}}"
|
||||
name: etcd-secrets
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/cilium/clustermesh
|
||||
name: clustermesh-secrets
|
||||
readOnly: true
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
initContainers:
|
||||
- command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CLEAN_CILIUM_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CLEAN_CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-bpf-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_WAIT_BPF_MOUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: wait-bpf-mount
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "{{cilium_init_image_repo}}:{{cilium_init_image_tag}}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: clean-cilium-state
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- "NET_ADMIN"
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
priorityClassName: system-node-critical
|
||||
restartPolicy: Always
|
||||
serviceAccount: cilium
|
||||
serviceAccountName: cilium
|
||||
terminationGracePeriodSeconds: 1
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- name: cilium-run
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /var/run/cilium
|
||||
# To keep state between restarts / upgrades
|
||||
- name: bpf-maps
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
name: cilium-run
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
name: bpf-maps
|
||||
{% if container_manager == 'docker' %}
|
||||
# To read docker events from the node
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /var/run/docker.sock
|
||||
type: Socket
|
||||
name: docker-socket
|
||||
{% else %}
|
||||
# To read crio events from the node
|
||||
- name: {{ container_manager }}-socket
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: {{ cri_socket }}
|
||||
type: Socket
|
||||
name: {{ container_manager }}-socket
|
||||
{% endif %}
|
||||
# To install cilium cni plugin in the host
|
||||
- name: cni-path
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
name: cni-path
|
||||
# To install cilium cni configuration in the host
|
||||
- name: etc-cni-netd
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
name: etc-cni-netd
|
||||
# To be able to load kernel modules
|
||||
- hostPath:
|
||||
path: /lib/modules
|
||||
name: lib-modules
|
||||
# To read the etcd config stored in config maps
|
||||
- name: etcd-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: etcd-config
|
||||
path: etcd.config
|
||||
name: cilium-config
|
||||
name: etcd-config-path
|
||||
# To read the k8s etcd secrets in case the user might want to use TLS
|
||||
- name: cilium-certs
|
||||
- name: etcd-secrets
|
||||
hostPath:
|
||||
path: {{ cilium_cert_dir }}
|
||||
|
||||
restartPolicy: Always
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
- key: CriticalAddonsOnly
|
||||
operator: "Exists"
|
||||
path: "{{cilium_cert_dir}}"
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: cilium-clustermesh
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
# Specifies the maximum number of Pods that can be unavailable during the update process.
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
|
|
Loading…
Reference in a new issue