c12s-kubespray/roles/network_plugin/cilium/templates/cilium-config.yml.j2
2021-09-22 10:00:02 -07:00

191 lines
7.8 KiB
Django/Jinja

---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# This etcd-config contains the etcd endpoints of your cluster. If you use
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- {{ ip_addr }}
{% endfor %}
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
# and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
# In case you want client to server authentication, uncomment the following
# lines and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# kvstore
# https://docs.cilium.io/en/latest/cmdref/kvstore/
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
prometheus-serve-addr: ":9090"
operator-prometheus-serve-addr: ":6942"
enable-metrics: "true"
{% endif %}
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
enable-ipv4: "{{ cilium_enable_ipv4 }}"
enable-ipv6: "{{ cilium_enable_ipv6 }}"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false"
# If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ cilium_monitor_aggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "{{ cilium_tunnel_mode }}"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
# DNS Polling periodically issues a DNS lookup for each `matchName` from
# cilium-agent. The result is used to regenerate endpoint policy.
# DNS lookups are repeated with an interval of 5 seconds, and are made for
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
# data is used instead. An IP change will trigger a regeneration of the Cilium
# policy for each endpoint and increment the per cilium-agent policy
# repository revision.
#
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "{{cilium_tofqdns_enable_poller}}"
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
# Enable legacy services (prior v1.5) to prevent from terminating existing
# connections with services when upgrading Cilium from < v1.5 to v1.5.
enable-legacy-services: "{{cilium_enable_legacy_services}}"
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}"
# Hubble settings
{% if cilium_enable_hubble %}
enable-hubble: "true"
{% if cilium_enable_hubble_metrics %}
hubble-metrics-server: ":9091"
hubble-metrics:
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
{{ hubble_metrics_cycle }}
{% endfor %}
{% endif %}
hubble-listen-address: ":4244"
{% if cilium_enable_hubble and cilium_hubble_install %}
hubble-disable-tls: "false"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
{% endif %}
{% endif %}
{% for key, value in cilium_config_extra_vars.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
# IPsec based transparent encryption between nodes
{% if cilium_ipsec_enabled %}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
encrypt-node: "false"
{% endif %}
# IPAM settings
{% if cilium_version | regex_replace('v') is version('1.9', '>=') %}
ipam: "{{ cilium_ipam_mode }}"
{% endif %}