14cf3e138b
* add initial MetalLB docs * metallb allow disabling the deployment of the metallb speaker * calico>=3.18 allow using calico to advertise service loadbalancer IPs * Document the use of MetalLB and Calico * clean MetalLB docs
406 lines
14 KiB
YAML
406 lines
14 KiB
YAML
---
|
|
- name: Calico | Copy calicoctl binary from download dir
|
|
copy:
|
|
src: "{{ local_release_dir }}/calicoctl"
|
|
dest: "{{ bin_dir }}/calicoctl"
|
|
mode: 0755
|
|
remote_src: yes
|
|
|
|
- name: Calico | Write Calico cni config
|
|
template:
|
|
src: "cni-calico.conflist.j2"
|
|
dest: "/etc/cni/net.d/calico.conflist.template"
|
|
owner: kube
|
|
register: calico_conflist
|
|
notify: reset_calico_cni
|
|
|
|
- name: Calico | Create calico certs directory
|
|
file:
|
|
dest: "{{ calico_cert_dir }}"
|
|
state: directory
|
|
mode: 0750
|
|
owner: root
|
|
group: root
|
|
when: calico_datastore == "etcd"
|
|
|
|
- name: Calico | Link etcd certificates for calico-node
|
|
file:
|
|
src: "{{ etcd_cert_dir }}/{{ item.s }}"
|
|
dest: "{{ calico_cert_dir }}/{{ item.d }}"
|
|
state: hard
|
|
mode: 0640
|
|
force: yes
|
|
with_items:
|
|
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
|
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
|
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
|
when: calico_datastore == "etcd"
|
|
|
|
- name: Calico | Generate typha certs
|
|
include_tasks: typha_certs.yml
|
|
when:
|
|
- typha_secure
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
|
|
- name: Calico | Install calicoctl wrapper script
|
|
template:
|
|
src: "calicoctl.{{ calico_datastore }}.sh.j2"
|
|
dest: "{{ bin_dir }}/calicoctl.sh"
|
|
mode: 0755
|
|
owner: root
|
|
group: root
|
|
|
|
- name: Calico | wait for etcd
|
|
uri:
|
|
url: "{{ etcd_access_addresses.split(',') | first }}/health"
|
|
validate_certs: no
|
|
client_cert: "{{ calico_cert_dir }}/cert.crt"
|
|
client_key: "{{ calico_cert_dir }}/key.pem"
|
|
register: result
|
|
until: result.status == 200 or result.status == 401
|
|
retries: 10
|
|
delay: 5
|
|
run_once: true
|
|
when: calico_datastore == "etcd"
|
|
|
|
- name: Calico | Check if calico network pool has already been configured
|
|
# noqa 306 - grep will exit 1 if no match found
|
|
shell: >
|
|
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
|
args:
|
|
executable: /bin/bash
|
|
register: calico_conf
|
|
retries: 4
|
|
until: calico_conf.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
changed_when: false
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
|
|
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
|
|
assert:
|
|
that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
|
|
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- 'calico_conf.stdout == "0"'
|
|
- calico_pool_cidr is defined
|
|
|
|
- name: Calico | Check if calico IPv6 network pool has already been configured
|
|
# noqa 306 - grep will exit 1 if no match found
|
|
shell: >
|
|
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
|
|
args:
|
|
executable: /bin/bash
|
|
register: calico_conf_ipv6
|
|
retries: 4
|
|
until: calico_conf_ipv6.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
changed_when: false
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- enable_dual_stack_networks
|
|
|
|
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
|
|
assert:
|
|
that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
|
|
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
|
|
- calico_pool_cidr_ipv6 is defined
|
|
- enable_dual_stack_networks
|
|
|
|
- block:
|
|
- name: Calico | Remove unwanted annotations and creationTimestamp keys from metadata in Calico manifests
|
|
# noqa 303 - sed avoids using nested loop
|
|
shell: >-
|
|
sed -E -i
|
|
-e '/^\s{2,4}creationTimestamp: null$/d'
|
|
-e '/^\s{2,4}annotations:/{:1;/\(devel\)$/!{N;b 1}; /.*/d}'
|
|
{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/*.yaml
|
|
when:
|
|
- calico_version is version('v3.17.0', '<')
|
|
|
|
- name: Calico | Create calico manifests for kdd
|
|
assemble:
|
|
src: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds"
|
|
dest: "{{ kube_config_dir }}/kdd-crds.yml"
|
|
delimiter: "---\n"
|
|
regexp: ".*\\.yaml"
|
|
remote_src: true
|
|
|
|
- name: Calico | Create Calico Kubernetes datastore resources
|
|
kube:
|
|
kubectl: "{{ bin_dir }}/kubectl"
|
|
filename: "{{ kube_config_dir }}/kdd-crds.yml"
|
|
state: "latest"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
when:
|
|
- inventory_hostname in groups['kube_control_plane']
|
|
- calico_datastore == "kdd"
|
|
|
|
- name: Calico | Configure calico network pool
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{ "kind": "IPPool",
|
|
"apiVersion": "projectcalico.org/v3",
|
|
"metadata": {
|
|
"name": "{{ calico_pool_name }}",
|
|
},
|
|
"spec": {
|
|
"blockSize": {{ calico_pool_blocksize | default(kube_network_node_prefix) }},
|
|
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
|
|
"ipipMode": "{{ calico_ipip_mode }}",
|
|
"vxlanMode": "{{ calico_vxlan_mode }}",
|
|
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }}
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- 'calico_conf.stdout == "0"'
|
|
|
|
- name: Calico | Configure calico ipv6 network pool (version >= v3.3.0)
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: >
|
|
{ "kind": "IPPool",
|
|
"apiVersion": "projectcalico.org/v3",
|
|
"metadata": {
|
|
"name": "{{ calico_pool_name }}-ipv6",
|
|
},
|
|
"spec": {
|
|
"blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }},
|
|
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
|
|
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
|
|
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
|
|
"natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }}
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
|
|
- enable_dual_stack_networks | bool
|
|
|
|
- name: Populate Service External IPs
|
|
set_fact:
|
|
_service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"
|
|
with_items: "{{ calico_advertise_service_external_ips }}"
|
|
run_once: yes
|
|
|
|
- name: Populate Service LoadBalancer IPs
|
|
set_fact:
|
|
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}"
|
|
with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
|
|
run_once: yes
|
|
|
|
- name: "Determine nodeToNodeMesh needed state"
|
|
set_fact:
|
|
nodeToNodeMeshEnabled: "false"
|
|
when:
|
|
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
|
|
- inventory_hostname in groups['k8s_cluster']
|
|
run_once: yes
|
|
|
|
- name: Calico | Set up BGP Configuration
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{ "kind": "BGPConfiguration",
|
|
"apiVersion": "projectcalico.org/v3",
|
|
"metadata": {
|
|
"name": "default",
|
|
},
|
|
"spec": {
|
|
"listenPort": {{ calico_bgp_listen_port }},
|
|
"logSeverityScreen": "Info",
|
|
{% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %}
|
|
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
|
|
{% if calico_advertise_cluster_ips|default(false) %}"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" }],{% endif %}
|
|
{% if calico_version is version('v3.18.0', '>') and calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
|
|
"serviceExternalIPs": {{ _service_external_ips|default([]) }} }}
|
|
changed_when: false
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
|
|
- name: Calico | Configure peering with router(s) at global scope
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{"apiVersion": "projectcalico.org/v3",
|
|
"kind": "BGPPeer",
|
|
"metadata": {
|
|
"name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}"
|
|
},
|
|
"spec": {
|
|
"asNumber": "{{ item.as }}",
|
|
"peerIP": "{{ item.router_id }}"
|
|
}}
|
|
register: output
|
|
retries: 4
|
|
until: output.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
with_items:
|
|
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- peer_with_router|default(false)
|
|
|
|
- name: Calico | Configure peering with route reflectors at global scope
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
# revert when it's already a string
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{"apiVersion": "projectcalico.org/v3",
|
|
"kind": "BGPPeer",
|
|
"metadata": {
|
|
"name": "peer-to-rrs"
|
|
},
|
|
"spec": {
|
|
"nodeSelector": "!has(i-am-a-route-reflector)",
|
|
"peerSelector": "has(i-am-a-route-reflector)"
|
|
}}
|
|
register: output
|
|
retries: 4
|
|
until: output.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
with_items:
|
|
- "{{ groups['calico_rr'] | default([]) }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- peer_with_calico_rr|default(false)
|
|
|
|
- name: Calico | Configure route reflectors to peer with each other
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
# revert when it's already a string
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{"apiVersion": "projectcalico.org/v3",
|
|
"kind": "BGPPeer",
|
|
"metadata": {
|
|
"name": "rr-mesh"
|
|
},
|
|
"spec": {
|
|
"nodeSelector": "has(i-am-a-route-reflector)",
|
|
"peerSelector": "has(i-am-a-route-reflector)"
|
|
}}
|
|
register: output
|
|
retries: 4
|
|
until: output.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
with_items:
|
|
- "{{ groups['calico_rr'] | default([]) }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- peer_with_calico_rr|default(false)
|
|
|
|
- name: Calico | Create calico manifests
|
|
template:
|
|
src: "{{ item.file }}.j2"
|
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
|
with_items:
|
|
- {name: calico-config, file: calico-config.yml, type: cm}
|
|
- {name: calico-node, file: calico-node.yml, type: ds}
|
|
- {name: calico, file: calico-node-sa.yml, type: sa}
|
|
- {name: calico, file: calico-cr.yml, type: clusterrole}
|
|
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
|
|
register: calico_node_manifests
|
|
when:
|
|
- inventory_hostname in groups['kube_control_plane']
|
|
- rbac_enabled or item.type not in rbac_resources
|
|
|
|
- name: Calico | Create calico manifests for typha
|
|
template:
|
|
src: "{{ item.file }}.j2"
|
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
|
with_items:
|
|
- {name: calico, file: calico-typha.yml, type: typha}
|
|
register: calico_node_typha_manifest
|
|
when:
|
|
- inventory_hostname in groups['kube_control_plane']
|
|
- typha_enabled and calico_datastore == "kdd"
|
|
|
|
- name: Start Calico resources
|
|
kube:
|
|
name: "{{ item.item.name }}"
|
|
namespace: "kube-system"
|
|
kubectl: "{{ bin_dir }}/kubectl"
|
|
resource: "{{ item.item.type }}"
|
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
|
state: "latest"
|
|
with_items:
|
|
- "{{ calico_node_manifests.results }}"
|
|
- "{{ calico_node_typha_manifest.results }}"
|
|
when:
|
|
- inventory_hostname == groups['kube_control_plane'][0]
|
|
- not item is skipped
|
|
loop_control:
|
|
label: "{{ item.item.file }}"
|
|
|
|
- name: Wait for calico kubeconfig to be created
|
|
wait_for:
|
|
path: /etc/cni/net.d/calico-kubeconfig
|
|
when:
|
|
- inventory_hostname not in groups['kube_control_plane']
|
|
- calico_datastore == "kdd"
|
|
|
|
- name: Calico | Configure node asNumber for per node peering
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{"apiVersion": "projectcalico.org/v3",
|
|
"kind": "Node",
|
|
"metadata": {
|
|
"name": "{{ inventory_hostname }}"
|
|
},
|
|
"spec": {
|
|
"bgp": {
|
|
"asNumber": "{{ local_as }}"
|
|
},
|
|
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
|
|
}}
|
|
register: output
|
|
retries: 4
|
|
until: output.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
when:
|
|
- peer_with_router|default(false)
|
|
- inventory_hostname in groups['k8s_cluster']
|
|
- local_as is defined
|
|
- groups['calico_rr'] | default([]) | length == 0
|
|
|
|
- name: Calico | Configure peering with router(s) at node scope
|
|
command:
|
|
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
|
|
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
|
|
vars:
|
|
stdin: >
|
|
{"apiVersion": "projectcalico.org/v3",
|
|
"kind": "BGPPeer",
|
|
"metadata": {
|
|
"name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}"
|
|
},
|
|
"spec": {
|
|
"asNumber": "{{ item.as }}",
|
|
"node": "{{ inventory_hostname }}",
|
|
"peerIP": "{{ item.router_id }}"
|
|
}}
|
|
register: output
|
|
retries: 4
|
|
until: output.rc == 0
|
|
delay: "{{ retry_stagger | random + 3 }}"
|
|
with_items:
|
|
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
|
|
when:
|
|
- peer_with_router|default(false)
|
|
- inventory_hostname in groups['k8s_cluster']
|