Changes to support Dual Stack networking

This commit is contained in:
Matt Calvert 2020-10-26 07:10:53 +00:00 committed by Kubernetes Prow Robot
parent ba731ed145
commit 4cc065e66d
12 changed files with 151 additions and 8 deletions

View file

@ -160,6 +160,7 @@ Note: The list of available docker version is 18.09, 19.03 and 20.10. The recomm
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is not supported for now**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**.
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method

View file

@ -58,13 +58,14 @@ To re-define you need to edit the inventory and add a group variable `calico_net
calico_network_backend: none
```
### Optional : Define the default pool CIDR
### Optional : Define the default pool CIDRs
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool.
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6.
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
```ShellSession
calico_pool_cidr: 10.233.64.0/20
calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
```
### Optional : BGP Peering with border routers

View file

@ -62,6 +62,10 @@ following default cluster parameters:
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
(assertion not applicable to calico which doesn't use this as a hard limit, see
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube-nodes can be in cluster.
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
@ -87,6 +91,10 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses``
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
## Enabling Dual Stack (IPV4 + IPV6) networking
If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
## DNS variables
By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all

View file

@ -94,6 +94,25 @@ kube_pods_subnet: 10.233.64.0/18
# - kubelet_max_pods: 110
kube_network_node_prefix: 24
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
enable_dual_stack_networks: false
# Kubernetes internal network for IPv6 services, unused block of space.
# This is only used if enable_dual_stack_networks is set to true
# This provides 4096 IPv6 IPs
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
# This network must not already be in your network infrastructure!
# This is only used if enable_dual_stack_networks is set to true.
# This provides room for 256 nodes with 254 pods per node.
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# IPv6 subnet size allocated to each for pods.
# This is only used if enable_dual_stack_networks is set to true
# This provides room for 254 pods per node.
kube_network_node_prefix_ipv6: 120
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)

View file

@ -20,6 +20,9 @@
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
# calico_pool_cidr: 1.2.3.4/5
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# Global as_num (/calico/bgp/v1/global/as_num)
# global_as_num: "64512"

View file

@ -88,8 +88,14 @@ dns:
imageTag: {{ coredns_image_tag }}
networking:
dnsDomain: {{ dns_domain }}
serviceSubnet: {{ kube_service_addresses }}
podSubnet: {{ kube_pods_subnet }}
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}"
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}"
{% if kube_feature_gates %}
featureGates:
{% for kube_feature_gate in kube_feature_gates %}
{{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }}
{% endfor %}
{% endif %}
kubernetesVersion: {{ kube_version }}
{% if kubeadm_config_api_fqdn is defined %}
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
@ -127,6 +133,7 @@ apiServer:
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}"
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
profiling: "{{ kube_profiling }}"
request-timeout: "{{ kube_apiserver_request_timeout }}"
@ -262,7 +269,14 @@ controllerManager:
extraArgs:
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
node-monitor-period: {{ kube_controller_node_monitor_period }}
cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}"
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}"
{% if enable_dual_stack_networks %}
node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}"
node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}"
{% else %}
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
{% endif %}
profiling: "{{ kube_profiling }}"
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
bind-address: {{ kube_controller_manager_bind_address }}
@ -349,7 +363,7 @@ clientConnection:
contentType: {{ kube_proxy_client_content_type }}
kubeconfig: {{ kube_proxy_client_kubeconfig }}
qps: {{ kube_proxy_client_qps }}
clusterCIDR: {{ kube_pods_subnet }}
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}"
configSyncPeriod: {{ kube_proxy_config_sync_period }}
conntrack:
maxPerCore: {{ kube_proxy_conntrack_max_per_core }}
@ -357,6 +371,12 @@ conntrack:
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
enableProfiling: {{ kube_proxy_enable_profiling }}
{% if kube_feature_gates %}
featureGates:
{% for kube_feature_gate in kube_feature_gates %}
{{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }}
{% endfor %}
{% endif %}
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
hostnameOverride: {{ kube_override_hostname }}
iptables:
@ -404,3 +424,9 @@ clusterDNS:
{% for dns_address in kubelet_cluster_dns %}
- {{ dns_address }}
{% endfor %}
{% if kube_feature_gates %}
featureGates:
{% for kube_feature_gate in kube_feature_gates %}
{{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }}
{% endfor %}
{% endif %}

View file

@ -62,6 +62,15 @@
state: present
reload: yes
- name: Enable ipv6 forwarding
sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: net.ipv6.conf.all.forwarding
value: 1
state: present
reload: yes
when: enable_dual_stack_networks | bool
- name: Ensure kube-bench parameters are set
sysctl:
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf

View file

@ -181,6 +181,25 @@ kube_pods_subnet: 10.233.64.0/18
# - kubelet_max_pods: 110
kube_network_node_prefix: 24
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
enable_dual_stack_networks: false
# Kubernetes internal network for IPv6 services, unused block of space.
# This is only used if enable_dual_stack_networks is set to true
# This provides 4096 IPv6 IPs
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
# This network must not already be in your network infrastructure!
# This is only used if enable_dual_stack_networks is set to true.
# This provides room for 256 nodes with 254 pods per node.
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# IPv6 subnet size allocated to each for pods.
# This is only used if enable_dual_stack_networks is set to true
# This provides room for 254 pods per node.
kube_network_node_prefix_ipv6: 120
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint

View file

@ -12,6 +12,10 @@ ipip_mode: "{{ 'Always' if ipip else 'Never' }}" # change to "CrossSubnet" if y
calico_ipip_mode: "{{ ipip_mode }}"
calico_vxlan_mode: 'Never'
calico_ipip_mode_ipv6: Never
calico_vxlan_mode_ipv6: Never
calico_pool_blocksize_ipv6: 116
calico_cert_dir: /etc/calico/certs
# Global as_num (/calico/bgp/v1/global/as_num)

View file

@ -108,6 +108,31 @@
- 'calico_conf.stdout == "0"'
- calico_pool_cidr is defined
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
args:
executable: /bin/bash
register: calico_conf_ipv6
retries: 4
until: calico_conf_ipv6.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- enable_dual_stack_networks
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
assert:
that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
when:
- inventory_hostname == groups['kube-master'][0]
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
- name: Calico | Create calico manifests for kdd
template:
src: "{{ item.file }}.j2"
@ -156,6 +181,27 @@
- inventory_hostname == groups['kube-master'][0]
- 'calico_conf.stdout == "0"'
- name: Calico | Configure calico ipv6 network pool (version >= v3.3.0)
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: >
{ "kind": "IPPool",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "{{ calico_pool_name }}-ipv6",
},
"spec": {
"blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }},
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
"natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }}
when:
- inventory_hostname == groups['kube-master'][0]
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_version is version("v3.3.0", ">=")
- enable_dual_stack_networks | bool
- name: Populate Service External IPs
set_fact:
_service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"

View file

@ -200,9 +200,8 @@ spec:
{% endif %}
- name: CALICO_IPV4POOL_IPIP
value: "{{ calico_ipv4pool_ipip }}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
value: "{{ enable_dual_stack_networks | default(false) }}"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{ calico_loglevel }}"
@ -239,6 +238,10 @@ spec:
- name: IP
value: "autodetect"
{% endif %}
{% if enable_dual_stack_networks %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_use_default_route_src_ipaddr|default(false) %}
- name: FELIX_DEVICEROUTESOURCEADDRESS
valueFrom:

View file

@ -30,6 +30,10 @@
{% else %}
"ipam": {
"type": "calico-ipam",
{% if enable_dual_stack_networks %}
"assign_ipv6": "true",
"ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"],
{% endif %}
"assign_ipv4": "true",
"ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"]
},