--- apiVersion: v1 kind: ConfigMap metadata: name: cilium-config namespace: kube-system data: identity-allocation-mode: {{ cilium_identity_allocation_mode }} {% if cilium_identity_allocation_mode == "kvstore" %} # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config etcd-config: |- --- endpoints: {% for ip_addr in etcd_access_addresses.split(',') %} - {{ ip_addr }} {% endfor %} # In case you want to use TLS in etcd, uncomment the 'ca-file' line # and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" # In case you want client to server authentication, uncomment the following # lines and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config key-file: "{{ cilium_cert_dir }}/key.pem" cert-file: "{{ cilium_cert_dir }}/cert.crt" # kvstore # https://docs.cilium.io/en/latest/cmdref/kvstore/ kvstore: etcd kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' {% endif %} # If you want metrics enabled in all of your Cilium agents, set the port for # which the Cilium agents will have their metrics exposed. # This option deprecates the "prometheus-serve-addr" in the # "cilium-metrics-config" ConfigMap # NOTE that this will open the port on ALL nodes where Cilium pods are # scheduled. {% if cilium_enable_prometheus %} prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}" operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}" enable-metrics: "true" {% endif %} # If you want to run cilium in debug mode change this value to true debug: "{{ cilium_debug }}" enable-ipv4: "{{ cilium_enable_ipv4 }}" enable-ipv6: "{{ cilium_enable_ipv6 }}" # If a serious issue occurs during Cilium startup, this # invasive option may be set to true to remove all persistent # state. Endpoints will not be restored using knowledge from a # prior Cilium run, so they may receive new IP addresses upon # restart. This also triggers clean-cilium-bpf-state. clean-cilium-state: "false" # If you want to clean cilium BPF state, set this to true; # Removes all BPF maps from the filesystem. Upon restart, # endpoints are restored with the same IP addresses, however # any ongoing connections may be disrupted briefly. # Loadbalancing decisions will be reset, so any ongoing # connections via a service may be loadbalanced to a different # backend after restart. clean-cilium-bpf-state: "false" # Users who wish to specify their own custom CNI configuration file must set # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. custom-cni-conf: "false" # If you want cilium monitor to aggregate tracing for packets, set this level # to "low", "medium", or "maximum". The higher the level, the less packets # that will be seen in monitor output. monitor-aggregation: "{{ cilium_monitor_aggregation }}" # ct-global-max-entries-* specifies the maximum number of connections # supported across all endpoints, split by protocol: tcp or other. One pair # of maps uses these values for IPv4 connections, and another pair of maps # use these values for IPv6 connections. # # If these values are modified, then during the next Cilium startup the # tracking of ongoing connections may be disrupted. This may lead to brief # policy drops or a change in loadbalancing decisions for a connection. # # For users upgrading from Cilium 1.2 or earlier, to minimize disruption # during the upgrade process, comment out these options. bpf-ct-global-tcp-max: "524288" bpf-ct-global-any-max: "262144" # Pre-allocation of map entries allows per-packet latency to be reduced, at # the expense of up-front memory allocation for the entries in the maps. The # default value below will minimize memory usage in the default installation; # users who are sensitive to latency may consider setting this to "true". # # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore # this option and behave as though it is set to "true". # # If this value is modified, then during the next Cilium startup the restore # of existing endpoints and tracking of ongoing connections may be disrupted. # This may lead to policy drops or a change in loadbalancing decisions for a # connection for some time. Endpoints may need to be recreated to restore # connectivity. # # If this option is set to "false" during an upgrade from 1.3 or earlier to # 1.4 or later, then it may cause one-time disruptions during the upgrade. preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}" # Regular expression matching compatible Istio sidecar istio-proxy # container image names sidecar-istio-proxy-image: "cilium/istio_proxy" # Encapsulation mode for communication between nodes # Possible values: # - disabled # - vxlan (default) # - geneve tunnel: "{{ cilium_tunnel_mode }}" # Enable Bandwidth Manager # Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. {% if cilium_enable_bandwidth_manager %} enable-bandwidth-manager: "true" {% endif %} # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: "{{ cilium_cluster_name }}" # Unique ID of the cluster. Must be unique across all conneted clusters and # in the range of 1 and 255. Only relevant when building a mesh of clusters. #cluster-id: 1 {% if cilium_cluster_id is defined %} cluster-id: "{{ cilium_cluster_id }}" {% endif %} # `wait-bpf-mount` is removed after v1.10.4 # https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da {% if cilium_version | regex_replace('v') is version('1.10.4', '<') %} # wait-bpf-mount makes init container wait until bpf filesystem is mounted wait-bpf-mount: "false" {% endif %} kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}" # `native-routing-cidr` is deprecated in 1.10, removed in 1.12. # Replaced by `ipv4-native-routing-cidr` # https://github.com/cilium/cilium/pull/16695 {% if cilium_version | regex_replace('v') is version('1.12', '<') %} native-routing-cidr: "{{ cilium_native_routing_cidr }}" {% else %} {% if cilium_native_routing_cidr | length %} ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}" {% endif %} {% if cilium_native_routing_cidr_ipv6 | length %} ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}" {% endif %} {% endif %} auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}" operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}" # Hubble settings {% if cilium_enable_hubble %} enable-hubble: "true" {% if cilium_enable_hubble_metrics %} hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}" hubble-metrics: {% for hubble_metrics_cycle in cilium_hubble_metrics %} {{ hubble_metrics_cycle }} {% endfor %} {% endif %} hubble-listen-address: ":4244" {% if cilium_enable_hubble and cilium_hubble_install %} hubble-disable-tls: "false" hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt {% endif %} {% endif %} # IP Masquerade Agent enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}" {% for key, value in cilium_config_extra_vars.items() %} {{ key }}: "{{ value }}" {% endfor %} # Enable transparent network encryption {% if cilium_encryption_enabled %} {% if cilium_encryption_type == "ipsec" %} enable-ipsec: "true" ipsec-key-file: /etc/ipsec/keys encrypt-node: "{{ cilium_ipsec_node_encryption }}" {% endif %} {% if cilium_encryption_type == "wireguard" %} enable-wireguard: "true" enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}" {% endif %} {% endif %} # IPAM settings ipam: "{{ cilium_ipam_mode }}" agent-health-port: "{{ cilium_agent_health_port }}" {% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_host_root != '' %} cgroup-root: "{{ cilium_cgroup_host_root }}" {% endif %} bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}" enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}" enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}" enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}" enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}" enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}" enable-well-known-identities: "{{ cilium_enable_well_known_identities }}" monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}" enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}" disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}" {% if cilium_ip_masq_agent_enable %} --- apiVersion: v1 kind: ConfigMap metadata: name: ip-masq-agent namespace: kube-system data: config: | nonMasqueradeCIDRs: {% for cidr in cilium_non_masquerade_cidrs %} - {{ cidr }} {% endfor %} masqLinkLocal: {{ cilium_masq_link_local|bool }} resyncInterval: "{{ cilium_ip_masq_resync_interval }}" {% endif %}