c12s-kubespray/roles/network_plugin/calico/tasks/install.yml

357 lines
11 KiB
YAML

---
- name: Calico | Copy calicoctl binary from download dir
copy:
src: "{{ local_release_dir }}/calicoctl"
dest: "{{ bin_dir }}/calicoctl"
mode: 0755
remote_src: yes
- name: Calico | Check if host has NetworkManager # noqa 303
command: systemctl show NetworkManager
register: nm_check
failed_when: false
changed_when: false
- name: Calico | Ensure NetworkManager conf.d dir
file:
path: "/etc/NetworkManager/conf.d"
state: directory
recurse: yes
when: nm_check.rc == 0
- name: Calico | Prevent NetworkManager from managing Calico interfaces
copy:
content: |
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
dest: /etc/NetworkManager/conf.d/calico.conf
when: nm_check.rc == 0
notify: Calico | Reload NetworkManager
- name: Calico | Write Calico cni config
template:
src: "cni-calico.conflist.j2"
dest: "/etc/cni/net.d/{% if calico_version is version('v3.3.0', '>=') %}calico.conflist.template{% else %}10-calico.conflist{% endif %}"
owner: kube
register: calico_conflist
notify: reset_calico_cni
- name: Calico | Create calico certs directory
file:
dest: "{{ calico_cert_dir }}"
state: directory
mode: 0750
owner: root
group: root
when: calico_datastore == "etcd"
- name: Calico | Link etcd certificates for calico-node
file:
src: "{{ etcd_cert_dir }}/{{ item.s }}"
dest: "{{ calico_cert_dir }}/{{ item.d }}"
state: hard
force: yes
with_items:
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
when: calico_datastore == "etcd"
- name: Calico | Generate typha certs
include_tasks: typha_certs.yml
when:
- typha_secure
- inventory_hostname == groups['kube-master'][0]
- name: Calico | Install calicoctl wrapper script
template:
src: "calicoctl.{{ calico_datastore }}.sh.j2"
dest: "{{ bin_dir }}/calicoctl.sh"
mode: 0755
owner: root
group: root
- name: Calico | wait for etcd
uri:
url: "{{ etcd_access_addresses.split(',') | first }}/health"
validate_certs: no
client_cert: "{{ calico_cert_dir }}/cert.crt"
client_key: "{{ calico_cert_dir }}/key.pem"
register: result
until: result.status == 200 or result.status == 401
retries: 10
delay: 5
run_once: true
when: calico_datastore == "etcd"
- name: Calico | Check if calico network pool has already been configured # noqa 306
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
register: calico_conf
retries: 4
until: calico_conf.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
assert:
that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
when:
- inventory_hostname == groups['kube-master'][0]
- 'calico_conf.stdout == "0"'
- calico_pool_cidr is defined
- name: Calico | Create calico manifests for kdd
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
with_items:
- {name: calico, file: kdd-crds.yml, type: kdd}
register: calico_node_kdd_manifest
when:
- inventory_hostname in groups['kube-master']
- calico_datastore == "kdd"
- name: Calico | Create Calico Kubernetes datastore resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ calico_node_kdd_manifest.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306
shell: >
echo "
{ "kind": "IPPool",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "{{ calico_pool_name }}",
},
"spec": {
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
"ipipMode": "{{ calico_ipip_mode }}",
"vxlanMode": "{{ calico_vxlan_mode }}",
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} " | {{ bin_dir }}/calicoctl.sh apply -f -
when:
- inventory_hostname == groups['kube-master'][0]
- 'calico_conf.stdout == "0"'
- calico_version is version("v3.3.0", "<")
- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306
shell: >
echo "
{ "kind": "IPPool",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "{{ calico_pool_name }}",
},
"spec": {
"blockSize": "{{ calico_pool_blocksize | default(kube_network_node_prefix) }}",
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
"ipipMode": "{{ calico_ipip_mode }}",
"vxlanMode": "{{ calico_vxlan_mode }}",
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} " | {{ bin_dir }}/calicoctl.sh apply -f -
when:
- inventory_hostname == groups['kube-master'][0]
- 'calico_conf.stdout == "0"'
- calico_version is version("v3.3.0", ">=")
- name: "Determine nodeToNodeMesh needed state"
set_fact:
nodeToNodeMeshEnabled: "false"
when:
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
- inventory_hostname in groups['k8s-cluster']
run_once: yes
- name: Calico | Set global as_num # noqa 306
shell: >
echo '
{ "kind": "BGPConfiguration",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "default",
},
"spec": {
"logSeverityScreen": "Info",
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
"asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl.sh apply -f -
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- name: Calico | Configure peering with router(s) at global scope # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "global-{{ item.router_id }}"
},
"spec": {
"asNumber": "{{ item.as }}",
"peerIP": "{{ item.router_id }}"
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- peer_with_router|default(false)
- name: Calico | Configure peering with route reflectors at global scope # noqa 306
shell: |
echo '{
"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "peer-to-rrs"
},
"spec": {
"nodeSelector": "!has(i-am-a-route-reflector)",
"peerSelector": "has(i-am-a-route-reflector)"
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ groups['calico-rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- peer_with_calico_rr|default(false)
- name: Calico | Configure route reflectors to peer with each other # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "rr-mesh"
},
"spec": {
"nodeSelector": "has(i-am-a-route-reflector)",
"peerSelector": "has(i-am-a-route-reflector)"
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ groups['calico-rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- peer_with_calico_rr|default(false)
- name: Calico | Create calico manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
with_items:
- {name: calico-config, file: calico-config.yml, type: cm}
- {name: calico-node, file: calico-node.yml, type: ds}
- {name: calico, file: calico-node-sa.yml, type: sa}
- {name: calico, file: calico-cr.yml, type: clusterrole}
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
register: calico_node_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources
- name: Calico | Create calico manifests for typha
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
with_items:
- {name: calico, file: calico-typha.yml, type: typha}
register: calico_node_typha_manifest
when:
- inventory_hostname in groups['kube-master']
- typha_enabled and calico_datastore == "kdd"
- name: Start Calico resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items:
- "{{ calico_node_manifests.results }}"
- "{{ calico_node_kdd_manifest.results }}"
- "{{ calico_node_typha_manifest.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
- name: Wait for calico kubeconfig to be created
wait_for:
path: /etc/cni/net.d/calico-kubeconfig
when:
- inventory_hostname not in groups['kube-master']
- calico_datastore == "kdd"
- name: Calico | Configure node asNumber for per node peering # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
"kind": "Node",
"metadata": {
"name": "{{ inventory_hostname }}"
},
"spec": {
"bgp": {
"asNumber": "{{ local_as }}"
},
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- peer_with_router|default(false)
- inventory_hostname in groups['k8s-cluster']
- local_as is defined
- groups['calico-rr'] | default([]) | length == 0
- name: Calico | Configure peering with router(s) at node scope # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "{{ inventory_hostname }}-{{ item.router_id }}"
},
"spec": {
"asNumber": "{{ item.as }}",
"node": "{{ inventory_hostname }}",
"peerIP": "{{ item.router_id }}"
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
when:
- peer_with_router|default(false)
- inventory_hostname in groups['k8s-cluster']