calico upgrade to v3 (#3086)
* calico upgrade to v3 * update calico_rr version * add missing file * change contents of main.yml as it was left old version * enable network policy by default * remove unneeded task * Fix kubelet calico settings * fix when statement * switch back to node-kubeconfig.yaml
This commit is contained in:
parent
f453567cce
commit
23fd3461bc
27 changed files with 399 additions and 252 deletions
|
@ -95,7 +95,7 @@ Supported Components
|
||||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [calico](https://github.com/projectcalico/calico) v2.6.8
|
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
||||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||||
|
|
|
@ -1,6 +1,13 @@
|
||||||
Calico
|
Calico
|
||||||
===========
|
===========
|
||||||
|
|
||||||
|
---
|
||||||
|
**N.B. Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
|
||||||
|
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
|
||||||
|
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
|
||||||
|
**PLEASE TEST upgrade before upgrading production cluster.**
|
||||||
|
---
|
||||||
|
|
||||||
Check if the calico-node container is running
|
Check if the calico-node container is running
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -86,7 +93,7 @@ To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
||||||
recommended here:
|
recommended here:
|
||||||
|
|
||||||
* https://hub.docker.com/r/calico/routereflector/
|
* https://hub.docker.com/r/calico/routereflector/
|
||||||
* http://docs.projectcalico.org/v2.0/reference/private-cloud/l3-interconnect-fabric
|
* https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric
|
||||||
|
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
|
|
|
@ -83,9 +83,6 @@ kube_network_plugin: calico
|
||||||
# weave_no_masq_local: true
|
# weave_no_masq_local: true
|
||||||
# weave_extra_args: ~
|
# weave_extra_args: ~
|
||||||
|
|
||||||
# Enable kubernetes network policies
|
|
||||||
enable_network_policy: false
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
# Kubernetes internal network for services, unused block of space.
|
||||||
kube_service_addresses: 10.233.0.0/18
|
kube_service_addresses: 10.233.0.0/18
|
||||||
|
|
||||||
|
|
|
@ -32,11 +32,11 @@ kubeadm_version: "{{ kube_version }}"
|
||||||
etcd_version: v3.2.18
|
etcd_version: v3.2.18
|
||||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
calico_version: "v2.6.8"
|
calico_version: "v3.1.3"
|
||||||
calico_ctl_version: "v1.6.3"
|
calico_ctl_version: "v3.1.3"
|
||||||
calico_cni_version: "v1.11.4"
|
calico_cni_version: "v3.1.3"
|
||||||
calico_policy_version: "v1.0.3"
|
calico_policy_version: "v3.1.3"
|
||||||
calico_rr_version: "v0.4.2"
|
calico_rr_version: "v0.6.1"
|
||||||
flannel_version: "v0.10.0"
|
flannel_version: "v0.10.0"
|
||||||
flannel_cni_version: "v0.3.0"
|
flannel_cni_version: "v0.3.0"
|
||||||
vault_version: 0.10.1
|
vault_version: 0.10.1
|
||||||
|
|
|
@ -7,5 +7,13 @@
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ calico_node_manifests.results }}"
|
with_items:
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
- "{{ calico_node_manifests.results }}"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
|
|
||||||
|
- name: "calico upgrade complete"
|
||||||
|
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- calico_upgrade_needed|default(False)
|
||||||
|
|
|
@ -2,20 +2,12 @@
|
||||||
- name: Set cert dir
|
- name: Set cert dir
|
||||||
set_fact:
|
set_fact:
|
||||||
calico_cert_dir: "{{ canal_cert_dir }}"
|
calico_cert_dir: "{{ canal_cert_dir }}"
|
||||||
when: kube_network_plugin == 'canal'
|
when:
|
||||||
|
- kube_network_plugin == 'canal'
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
- canal
|
- canal
|
||||||
|
|
||||||
- name: Delete the old calico-policy-controller if it exist
|
|
||||||
kube:
|
|
||||||
name: calico-policy-controller
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: rs
|
|
||||||
namespace: "kube-system"
|
|
||||||
state: absent
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Create calico-kube-controllers manifests
|
- name: Create calico-kube-controllers manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{item.file}}.j2"
|
||||||
|
@ -26,7 +18,9 @@
|
||||||
- {name: calico-kube-controllers, file: calico-kube-cr.yml, type: clusterrole}
|
- {name: calico-kube-controllers, file: calico-kube-cr.yml, type: clusterrole}
|
||||||
- {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
|
- {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
|
||||||
register: calico_kube_manifests
|
register: calico_kube_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
||||||
|
|
||||||
- name: Start of Calico kube controllers
|
- name: Start of Calico kube controllers
|
||||||
kube:
|
kube:
|
||||||
|
@ -36,5 +30,8 @@
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ calico_kube_manifests.results }}"
|
with_items:
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
- "{{ calico_kube_manifests.results }}"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- not item|skipped
|
||||||
|
|
|
@ -6,8 +6,12 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: calico-kube-controllers
|
k8s-app: calico-kube-controllers
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -45,17 +49,6 @@ spec:
|
||||||
value: "{{ calico_cert_dir }}/cert.crt"
|
value: "{{ calico_cert_dir }}/cert.crt"
|
||||||
- name: ETCD_KEY_FILE
|
- name: ETCD_KEY_FILE
|
||||||
value: "{{ calico_cert_dir }}/key.pem"
|
value: "{{ calico_cert_dir }}/key.pem"
|
||||||
# Location of the Kubernetes API - this shouldn't need to be
|
|
||||||
# changed so long as it is used in conjunction with
|
|
||||||
# CONFIGURE_ETC_HOSTS="true".
|
|
||||||
- name: K8S_API
|
|
||||||
value: "https://kubernetes.default"
|
|
||||||
# Configure /etc/hosts within the container to resolve
|
|
||||||
# the kubernetes.default Service to the correct clusterIP
|
|
||||||
# using the environment provided by the kubelet.
|
|
||||||
# This removes the need for KubeDNS to resolve the Service.
|
|
||||||
- name: CONFIGURE_ETC_HOSTS
|
|
||||||
value: "true"
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: {{ calico_cert_dir }}
|
- mountPath: {{ calico_cert_dir }}
|
||||||
name: etcd-certs
|
name: etcd-certs
|
||||||
|
|
|
@ -12,6 +12,14 @@ rules:
|
||||||
- pods
|
- pods
|
||||||
- namespaces
|
- namespaces
|
||||||
- networkpolicies
|
- networkpolicies
|
||||||
|
- nodes
|
||||||
verbs:
|
verbs:
|
||||||
- watch
|
- watch
|
||||||
- list
|
- list
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
|
@ -88,6 +88,15 @@
|
||||||
- is_kube_master
|
- is_kube_master
|
||||||
- kubeadm_discovery_address != kube_apiserver_endpoint
|
- kubeadm_discovery_address != kube_apiserver_endpoint
|
||||||
|
|
||||||
|
# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
|
||||||
|
- name: Symlink kubelet kubeconfig for calico/canal
|
||||||
|
file:
|
||||||
|
src: "{{ kube_config_dir }}/kubelet.conf"
|
||||||
|
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
||||||
|
state: link
|
||||||
|
force: yes
|
||||||
|
when: kube_network_plugin in ['calico','canal']
|
||||||
|
|
||||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||||
shell: "{{ bin_dir }}/kubectl delete pod -n kube-system -l k8s-app=kube-proxy"
|
shell: "{{ bin_dir }}/kubectl delete pod -n kube-system -l k8s-app=kube-proxy"
|
||||||
delegate_to: "{{groups['kube-master']|first}}"
|
delegate_to: "{{groups['kube-master']|first}}"
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
-v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
|
-v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
|
||||||
-v /var/log:/var/log:rw \
|
-v /var/log:/var/log:rw \
|
||||||
-v /var/lib/kubelet:/var/lib/kubelet:shared \
|
-v /var/lib/kubelet:/var/lib/kubelet:shared \
|
||||||
|
-v /var/lib/calico:/var/lib/calico:shared \
|
||||||
-v /var/lib/cni:/var/lib/cni:shared \
|
-v /var/lib/cni:/var/lib/cni:shared \
|
||||||
-v /var/run:/var/run:rw \
|
-v /var/run:/var/run:rw \
|
||||||
{# we can run into issues with double mounting /var/lib/kubelet #}
|
{# we can run into issues with double mounting /var/lib/kubelet #}
|
||||||
|
|
|
@ -37,6 +37,10 @@ ExecStart=/usr/bin/rkt run \
|
||||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||||
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
||||||
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
|
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
|
||||||
|
{% endif %}
|
||||||
|
{% if kube_network_plugin in ["calico", "canal"] %}
|
||||||
|
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=false \
|
||||||
|
{% endif %}
|
||||||
{# we can run into issues with double mounting /var/lib/kubelet #}
|
{# we can run into issues with double mounting /var/lib/kubelet #}
|
||||||
{# surely there's a better way to do this #}
|
{# surely there's a better way to do this #}
|
||||||
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
|
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
|
||||||
|
@ -55,6 +59,9 @@ ExecStart=/usr/bin/rkt run \
|
||||||
--mount volume=etc-cni,target=/etc/cni \
|
--mount volume=etc-cni,target=/etc/cni \
|
||||||
--mount volume=opt-cni,target=/opt/cni \
|
--mount volume=opt-cni,target=/opt/cni \
|
||||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||||
|
{% if kube_network_plugin in ["calico", "canal"] %}
|
||||||
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
||||||
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--mount volume=os-release,target=/etc/os-release \
|
--mount volume=os-release,target=/etc/os-release \
|
||||||
--mount volume=dns,target=/etc/resolv.conf \
|
--mount volume=dns,target=/etc/resolv.conf \
|
||||||
|
|
|
@ -89,6 +89,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "/etc/cni/net.d"
|
- "/etc/cni/net.d"
|
||||||
- "/opt/cni/bin"
|
- "/opt/cni/bin"
|
||||||
|
- "/var/lib/calico"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"]
|
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"]
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
|
@ -98,7 +98,7 @@
|
||||||
{%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
|
{%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
|
||||||
{{ [ skydns_server ] + [ skydns_server_secondary ] + upstream_dns_servers|default([]) }}
|
{{ [ skydns_server ] + [ skydns_server_secondary ] + upstream_dns_servers|default([]) }}
|
||||||
{%- elif dns_mode == 'manual' and not dns_early|bool -%}
|
{%- elif dns_mode == 'manual' and not dns_early|bool -%}
|
||||||
{{ [ manual_dns_server ] + upstream_dns_servers|default([]) }}
|
{{ ( manual_dns_server.split(',') | list) + upstream_dns_servers|default([]) }}
|
||||||
{%- elif dns_early|bool -%}
|
{%- elif dns_early|bool -%}
|
||||||
{{ upstream_dns_servers|default([]) }}
|
{{ upstream_dns_servers|default([]) }}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
|
|
|
@ -187,7 +187,7 @@ dashboard_enabled: true
|
||||||
efk_enabled: false
|
efk_enabled: false
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
registry_enabled: false
|
registry_enabled: false
|
||||||
enable_network_policy: false
|
enable_network_policy: true
|
||||||
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
|
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
|
||||||
persistent_volumes_enabled: false
|
persistent_volumes_enabled: false
|
||||||
cephfs_provisioner_enabled: false
|
cephfs_provisioner_enabled: false
|
||||||
|
|
|
@ -2,9 +2,12 @@
|
||||||
# Enables Internet connectivity from containers
|
# Enables Internet connectivity from containers
|
||||||
nat_outgoing: true
|
nat_outgoing: true
|
||||||
|
|
||||||
|
#add default ippool name
|
||||||
|
calico_pool_name: "default-pool"
|
||||||
|
|
||||||
# Use IP-over-IP encapsulation across hosts
|
# Use IP-over-IP encapsulation across hosts
|
||||||
ipip: true
|
ipip: true
|
||||||
ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
|
ipip_mode: Always # change to "CrossSubnet" if you only want ipip encapsulation on traffic going across subnets
|
||||||
|
|
||||||
# Set to true if you want your calico cni binaries to overwrite the
|
# Set to true if you want your calico cni binaries to overwrite the
|
||||||
# ones from hyperkube while leaving other cni plugins intact.
|
# ones from hyperkube while leaving other cni plugins intact.
|
||||||
|
@ -37,6 +40,9 @@ calico_felix_prometheusmetricsport: 9091
|
||||||
calico_felix_prometheusgometricsenabled: "true"
|
calico_felix_prometheusgometricsenabled: "true"
|
||||||
calico_felix_prometheusprocessmetricsenabled: "true"
|
calico_felix_prometheusprocessmetricsenabled: "true"
|
||||||
|
|
||||||
|
### check latest version https://github.com/projectcalico/calico-upgrade/releases
|
||||||
|
calico_upgrade_version: v1.0.5
|
||||||
|
|
||||||
# Should calico ignore kernel's RPF check setting,
|
# Should calico ignore kernel's RPF check setting,
|
||||||
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
|
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
|
||||||
calico_node_ignorelooserpf: false
|
calico_node_ignorelooserpf: false
|
||||||
|
@ -48,3 +54,6 @@ calico_node_ignorelooserpf: false
|
||||||
# calico_ip_auto_method: "interface=eth.*"
|
# calico_ip_auto_method: "interface=eth.*"
|
||||||
|
|
||||||
calico_baremetal_nodename: "{{ inventory_hostname }}"
|
calico_baremetal_nodename: "{{ inventory_hostname }}"
|
||||||
|
|
||||||
|
### do not enable this, this is detected in scope of tasks, this is just a default value
|
||||||
|
calico_upgrade_needed: false
|
37
roles/network_plugin/calico/tasks/check.yml
Normal file
37
roles/network_plugin/calico/tasks/check.yml
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
---
|
||||||
|
- name: "Check vars defined correctly"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "calico_pool_name is defined"
|
||||||
|
- "calico_pool_name | match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
|
||||||
|
- "ipip_mode is defined"
|
||||||
|
- "ipip_mode in ['Always', 'CrossSubnet', 'Never']"
|
||||||
|
msg: "Check variable definitions seems something is wrong"
|
||||||
|
run_once: yes
|
||||||
|
|
||||||
|
- name: "Get current version of calico cluster version"
|
||||||
|
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version' | awk '{ print $3}'"
|
||||||
|
register: calico_version_on_server
|
||||||
|
run_once: yes
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
- name: "Determine if calico upgrade is needed"
|
||||||
|
block:
|
||||||
|
- name: "Check that calico version is enought for upgrade"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||||
|
msg: "Your version of calico is not fresh enough for upgrade"
|
||||||
|
|
||||||
|
- name: "Set upgrade flag when version needs to be updated"
|
||||||
|
set_fact:
|
||||||
|
calico_upgrade_needed: True
|
||||||
|
when:
|
||||||
|
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||||
|
- calico_version_on_server.stdout|version_compare('v3.0.0', '<')
|
||||||
|
|
||||||
|
when:
|
||||||
|
- 'calico_version_on_server.stdout is defined'
|
||||||
|
- 'calico_version_on_server.stdout != ""'
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
run_once: yes
|
189
roles/network_plugin/calico/tasks/install.yml
Normal file
189
roles/network_plugin/calico/tasks/install.yml
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Calico | Write Calico cni config
|
||||||
|
template:
|
||||||
|
src: "cni-calico.conflist.j2"
|
||||||
|
dest: "/etc/cni/net.d/10-calico.conflist"
|
||||||
|
owner: kube
|
||||||
|
|
||||||
|
- name: Calico | Create calico certs directory
|
||||||
|
file:
|
||||||
|
dest: "{{ calico_cert_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0750
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
|
||||||
|
- name: Calico | Link etcd certificates for calico-node
|
||||||
|
file:
|
||||||
|
src: "{{ etcd_cert_dir }}/{{ item.s }}"
|
||||||
|
dest: "{{ calico_cert_dir }}/{{ item.d }}"
|
||||||
|
state: hard
|
||||||
|
force: yes
|
||||||
|
with_items:
|
||||||
|
- {s: "ca.pem", d: "ca_cert.crt"}
|
||||||
|
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
||||||
|
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
||||||
|
|
||||||
|
- name: Calico | Install calicoctl container script
|
||||||
|
template:
|
||||||
|
src: calicoctl-container.j2
|
||||||
|
dest: "{{ bin_dir }}/calicoctl"
|
||||||
|
mode: 0755
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Calico | Copy cni plugins from hyperkube
|
||||||
|
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
|
||||||
|
register: cni_task_result
|
||||||
|
until: cni_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
changed_when: false
|
||||||
|
tags:
|
||||||
|
- hyperkube
|
||||||
|
- upgrade
|
||||||
|
|
||||||
|
- name: Calico | Copy cni plugins from calico/cni container
|
||||||
|
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'"
|
||||||
|
register: cni_task_result
|
||||||
|
until: cni_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- "overwrite_hyperkube_cni|bool"
|
||||||
|
tags:
|
||||||
|
- hyperkube
|
||||||
|
- upgrade
|
||||||
|
|
||||||
|
- name: Calico | Set cni directory permissions
|
||||||
|
file:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
state: directory
|
||||||
|
owner: kube
|
||||||
|
recurse: true
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Calico | wait for etcd
|
||||||
|
uri:
|
||||||
|
url: "{{ etcd_access_addresses.split(',') | first }}/health"
|
||||||
|
validate_certs: no
|
||||||
|
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||||
|
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||||
|
register: result
|
||||||
|
until: result.status == 200 or result.status == 401
|
||||||
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Calico | Check if calico network pool has already been configured
|
||||||
|
shell: >
|
||||||
|
{{ bin_dir }}/calicoctl get ippool | grep -w "{{ kube_pods_subnet }}" | wc -l
|
||||||
|
register: calico_conf
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Calico | Configure calico network pool
|
||||||
|
shell: >
|
||||||
|
echo "
|
||||||
|
{ "kind": "IPPool",
|
||||||
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
"metadata": {
|
||||||
|
"name": "{{ calico_pool_name }}",
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"cidr": "{{ kube_pods_subnet }}",
|
||||||
|
"ipipMode": "{{ ipip_mode|capitalize }}",
|
||||||
|
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} " | {{ bin_dir }}/calicoctl create -f -
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when:
|
||||||
|
- 'calico_conf.stdout == "0"'
|
||||||
|
|
||||||
|
- name: "Determine nodeToNodeMesh needed state"
|
||||||
|
set_fact:
|
||||||
|
nodeToNodeMeshEnabled: "false"
|
||||||
|
when:
|
||||||
|
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
|
||||||
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
run_once: yes
|
||||||
|
|
||||||
|
|
||||||
|
- name: Calico | Set global as_num
|
||||||
|
shell: >
|
||||||
|
echo '
|
||||||
|
{ "kind": "BGPConfiguration",
|
||||||
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
"metadata": {
|
||||||
|
"name": "default",
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"logSeverityScreen": "Info",
|
||||||
|
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
|
||||||
|
"asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl --skip-exists create -f -
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
- name: Calico | Configure peering with router(s)
|
||||||
|
shell: >
|
||||||
|
echo '{
|
||||||
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
"kind": "bgpPeer",
|
||||||
|
"metadata": {
|
||||||
|
"name": "{{ inventory_hostname }}-bgp"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"asNumber": "{{ item.as }}",
|
||||||
|
"node": "{{ inventory_hostname }}",
|
||||||
|
"scope": "node",
|
||||||
|
"peerIP": "{{ item.router_id }}"
|
||||||
|
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items:
|
||||||
|
- "{{ peers|default([]) }}"
|
||||||
|
when:
|
||||||
|
- peer_with_router|default(false)
|
||||||
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
||||||
|
- name: Calico | Configure peering with route reflectors
|
||||||
|
shell: >
|
||||||
|
echo '{
|
||||||
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
"kind": "bgpPeer",
|
||||||
|
"metadata": {
|
||||||
|
"name": "{{ inventory_hostname }}"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"asNumber": "{{ local_as | default(global_as_num)}}",
|
||||||
|
"scope": "node",
|
||||||
|
"node": "{{ inventory_hostname }}",
|
||||||
|
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"
|
||||||
|
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items:
|
||||||
|
- "{{ groups['calico-rr'] | default([]) }}"
|
||||||
|
when:
|
||||||
|
- (peer_with_calico_rr|default(false)
|
||||||
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
|
- hostvars[item]['cluster_id'] == cluster_id)
|
||||||
|
|
||||||
|
- name: Calico | Create calico manifests
|
||||||
|
template:
|
||||||
|
src: "{{item.file}}.j2"
|
||||||
|
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||||
|
with_items:
|
||||||
|
- {name: calico-config, file: calico-config.yml, type: cm}
|
||||||
|
- {name: calico-node, file: calico-node.yml, type: ds}
|
||||||
|
- {name: calico, file: calico-node-sa.yml, type: sa}
|
||||||
|
- {name: calico, file: calico-cr.yml, type: clusterrole}
|
||||||
|
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
|
||||||
|
register: calico_node_manifests
|
||||||
|
when:
|
||||||
|
- inventory_hostname in groups['kube-master']
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
|
@ -1,193 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: Calico | Disable calico-node service if it exists
|
- include_tasks: check.yml
|
||||||
service:
|
|
||||||
name: calico-node
|
|
||||||
state: stopped
|
|
||||||
enabled: yes
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Calico | Get kubelet hostname
|
- include_tasks: pre.yml
|
||||||
shell: >-
|
|
||||||
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
|
||||||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
|
||||||
register: calico_kubelet_name
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
when: cloud_provider is defined
|
|
||||||
|
|
||||||
- name: Calico | Write Calico cni config
|
- include_tasks: upgrade.yml
|
||||||
template:
|
|
||||||
src: "cni-calico.conflist.j2"
|
|
||||||
dest: "/etc/cni/net.d/10-calico.conflist"
|
|
||||||
owner: kube
|
|
||||||
|
|
||||||
- name: Calico | Create calico certs directory
|
|
||||||
file:
|
|
||||||
dest: "{{ calico_cert_dir }}"
|
|
||||||
state: directory
|
|
||||||
mode: 0750
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
- name: Calico | Link etcd certificates for calico-node
|
|
||||||
file:
|
|
||||||
src: "{{ etcd_cert_dir }}/{{ item.s }}"
|
|
||||||
dest: "{{ calico_cert_dir }}/{{ item.d }}"
|
|
||||||
state: hard
|
|
||||||
force: yes
|
|
||||||
with_items:
|
|
||||||
- {s: "ca.pem", d: "ca_cert.crt"}
|
|
||||||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
|
||||||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
|
||||||
|
|
||||||
- name: Calico | Install calicoctl container script
|
|
||||||
template:
|
|
||||||
src: calicoctl-container.j2
|
|
||||||
dest: "{{ bin_dir }}/calicoctl"
|
|
||||||
mode: 0755
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Calico | Copy cni plugins from hyperkube
|
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
|
|
||||||
register: cni_task_result
|
|
||||||
until: cni_task_result.rc == 0
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
changed_when: false
|
|
||||||
tags:
|
|
||||||
- hyperkube
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Calico | Copy cni plugins from calico/cni container
|
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'"
|
|
||||||
register: cni_task_result
|
|
||||||
until: cni_task_result.rc == 0
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
changed_when: false
|
|
||||||
when: overwrite_hyperkube_cni|bool
|
|
||||||
tags:
|
|
||||||
- hyperkube
|
|
||||||
- upgrade
|
|
||||||
|
|
||||||
- name: Calico | Set cni directory permissions
|
|
||||||
file:
|
|
||||||
path: /opt/cni/bin
|
|
||||||
state: directory
|
|
||||||
owner: kube
|
|
||||||
recurse: true
|
|
||||||
mode: 0755
|
|
||||||
|
|
||||||
- name: Calico | wait for etcd
|
|
||||||
uri:
|
|
||||||
url: "{{ etcd_access_addresses.split(',') | first }}/health"
|
|
||||||
validate_certs: no
|
|
||||||
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
|
||||||
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
|
||||||
register: result
|
|
||||||
until: result.status == 200 or result.status == 401
|
|
||||||
retries: 10
|
|
||||||
delay: 5
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Calico | Check if calico network pool has already been configured
|
|
||||||
command: |-
|
|
||||||
curl \
|
|
||||||
--cacert {{ etcd_cert_dir }}/ca.pem \
|
|
||||||
--cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
|
|
||||||
--key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
|
|
||||||
{{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
|
|
||||||
register: calico_conf
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
run_once: true
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Calico | Configure calico network pool
|
|
||||||
shell: >
|
|
||||||
echo '{
|
|
||||||
"kind": "ipPool",
|
|
||||||
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
|
|
||||||
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
|
|
||||||
}'
|
|
||||||
| {{ bin_dir }}/calicoctl apply -f -
|
|
||||||
environment:
|
|
||||||
NO_DEFAULT_POOLS: true
|
|
||||||
run_once: true
|
|
||||||
when: ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout)
|
|
||||||
|
|
||||||
- name: Calico | Get calico configuration from etcd
|
|
||||||
command: |-
|
|
||||||
curl \
|
|
||||||
--cacert {{ etcd_cert_dir }}/ca.pem \
|
|
||||||
--cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
|
|
||||||
--key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
|
|
||||||
{{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
|
|
||||||
register: calico_pools_raw
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
calico_pools: "{{ calico_pools_raw.stdout | from_json }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Calico | Set global as_num
|
|
||||||
command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Calico | Disable node mesh
|
|
||||||
shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false))
|
|
||||||
and inventory_hostname in groups['k8s-cluster'])
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Calico | Configure peering with router(s)
|
|
||||||
shell: >
|
|
||||||
echo '{
|
|
||||||
"kind": "bgpPeer",
|
|
||||||
"spec": {"asNumber": "{{ item.as }}"},
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"metadata": {"node": "{{ inventory_hostname }}", "scope": "node", "peerIP": "{{ item.router_id }}"}
|
|
||||||
}'
|
|
||||||
| {{ bin_dir }}/calicoctl create --skip-exists -f -
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
with_items: "{{ peers|default([]) }}"
|
|
||||||
when: peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']
|
|
||||||
|
|
||||||
- name: Calico | Configure peering with route reflectors
|
|
||||||
shell: >
|
|
||||||
echo '{
|
|
||||||
"kind": "bgpPeer",
|
|
||||||
"spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"metadata": {"node": "{{ inventory_hostname }}",
|
|
||||||
"scope": "node",
|
|
||||||
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"}
|
|
||||||
}'
|
|
||||||
| {{ bin_dir }}/calicoctl create --skip-exists -f -
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
|
||||||
when: (peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']
|
|
||||||
and hostvars[item]['cluster_id'] == cluster_id)
|
|
||||||
|
|
||||||
- name: Calico | Create calico manifests
|
|
||||||
template:
|
|
||||||
src: "{{item.file}}.j2"
|
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
|
||||||
with_items:
|
|
||||||
- {name: calico-config, file: calico-config.yml, type: cm}
|
|
||||||
- {name: calico-node, file: calico-node.yml, type: ds}
|
|
||||||
- {name: calico, file: calico-node-sa.yml, type: sa}
|
|
||||||
- {name: calico, file: calico-cr.yml, type: clusterrole}
|
|
||||||
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
|
|
||||||
register: calico_node_manifests
|
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['kube-master']
|
- calico_upgrade_needed
|
||||||
|
run_once: yes
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
- include_tasks: install.yml
|
||||||
|
|
16
roles/network_plugin/calico/tasks/pre.yml
Normal file
16
roles/network_plugin/calico/tasks/pre.yml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
- name: Calico | Disable calico-node service if it exists
|
||||||
|
service:
|
||||||
|
name: calico-node
|
||||||
|
state: stopped
|
||||||
|
enabled: no
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Calico | Get kubelet hostname
|
||||||
|
shell: >-
|
||||||
|
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
||||||
|
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
||||||
|
register: calico_kubelet_name
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when:
|
||||||
|
- "cloud_provider is defined"
|
26
roles/network_plugin/calico/tasks/upgrade.yml
Normal file
26
roles/network_plugin/calico/tasks/upgrade.yml
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
- name: "Download calico-upgrade tool (force version)"
|
||||||
|
get_url:
|
||||||
|
url: "https://github.com/projectcalico/calico-upgrade/releases/download/{{ calico_upgrade_version }}/calico-upgrade"
|
||||||
|
dest: "{{ bin_dir }}/calico-upgrade"
|
||||||
|
mode: 0755
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
- name: "Create etcdv2 and etcdv3 calicoApiConfig"
|
||||||
|
template:
|
||||||
|
src: "{{ item }}-store.yml.j2"
|
||||||
|
dest: "/etc/calico/{{ item }}.yml"
|
||||||
|
with_items:
|
||||||
|
- "etcdv2"
|
||||||
|
- "etcdv3"
|
||||||
|
|
||||||
|
- name: "Tests data migration (dry-run)"
|
||||||
|
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||||
|
register: calico_upgrade_test_data
|
||||||
|
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
|
||||||
|
|
||||||
|
- name: "If test migration is success continue with calico data real migration"
|
||||||
|
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
|
||||||
|
register: calico_upgrade_migration_data
|
|
@ -77,12 +77,6 @@ spec:
|
||||||
# Set Felix logging to "info"
|
# Set Felix logging to "info"
|
||||||
- name: FELIX_LOGSEVERITYSCREEN
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
value: "info"
|
value: "info"
|
||||||
# Disable autocreation of pools
|
|
||||||
- name: CALICO_NO_DEFAULT_POOLS
|
|
||||||
value: "true"
|
|
||||||
# Enable libnetwork
|
|
||||||
- name: CALICO_LIBNETWORK_ENABLED
|
|
||||||
value: "true"
|
|
||||||
# Set MTU for tunnel device used if ipip is enabled
|
# Set MTU for tunnel device used if ipip is enabled
|
||||||
{% if calico_mtu is defined %}
|
{% if calico_mtu is defined %}
|
||||||
- name: FELIX_IPINIPMTU
|
- name: FELIX_IPINIPMTU
|
||||||
|
@ -158,6 +152,8 @@ spec:
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- mountPath: /var/run/calico
|
- mountPath: /var/run/calico
|
||||||
name: var-run-calico
|
name: var-run-calico
|
||||||
|
- mountPath: /var/lib/calico
|
||||||
|
name: var-lib-calico
|
||||||
readOnly: false
|
readOnly: false
|
||||||
- mountPath: /calico-secrets
|
- mountPath: /calico-secrets
|
||||||
name: etcd-certs
|
name: etcd-certs
|
||||||
|
@ -169,6 +165,9 @@ spec:
|
||||||
- name: var-run-calico
|
- name: var-run-calico
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/run/calico
|
path: /var/run/calico
|
||||||
|
- name: var-lib-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/calico
|
||||||
# Used to install CNI.
|
# Used to install CNI.
|
||||||
- name: cni-bin-dir
|
- name: cni-bin-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -183,5 +182,4 @@ spec:
|
||||||
updateStrategy:
|
updateStrategy:
|
||||||
rollingUpdate:
|
rollingUpdate:
|
||||||
maxUnavailable: {{ serial | default('20%') }}
|
maxUnavailable: {{ serial | default('20%') }}
|
||||||
type: RollingUpdate
|
type: RollingUpdate
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
"cniVersion":"0.3.1",
|
"cniVersion":"0.3.1",
|
||||||
"plugins":[
|
"plugins":[
|
||||||
{
|
{
|
||||||
{% if cloud_provider is defined %}
|
{% if cloud_provider is defined %}
|
||||||
"nodename": "{{ calico_kubelet_name.stdout }}",
|
"nodename": "{{ calico_kubelet_name.stdout }}",
|
||||||
{% else %}
|
{% else %}
|
||||||
"nodename": "{{ calico_baremetal_nodename }}",
|
"nodename": "{{ calico_baremetal_nodename }}",
|
||||||
|
@ -19,14 +19,14 @@
|
||||||
"assign_ipv4": "true",
|
"assign_ipv4": "true",
|
||||||
"ipv4_pools": ["{{ kube_pods_subnet }}"]
|
"ipv4_pools": ["{{ kube_pods_subnet }}"]
|
||||||
},
|
},
|
||||||
{% if enable_network_policy %}
|
{% if enable_network_policy %}
|
||||||
"policy": {
|
"policy": {
|
||||||
"type": "k8s"
|
"type": "k8s"
|
||||||
},
|
},
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{% if calico_mtu is defined and calico_mtu is number %}
|
{% if calico_mtu is defined and calico_mtu is number %}
|
||||||
"mtu": {{ calico_mtu }},
|
"mtu": {{ calico_mtu }},
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
"kubernetes": {
|
"kubernetes": {
|
||||||
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: calicoApiConfig
|
||||||
|
metadata:
|
||||||
|
spec:
|
||||||
|
datastoreType: "etcdv2"
|
||||||
|
etcdEndpoints: "{{ etcd_access_addresses }}"
|
||||||
|
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||||
|
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||||
|
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: projectcalico.org/v3
|
||||||
|
kind: CalicoAPIConfig
|
||||||
|
metadata:
|
||||||
|
spec:
|
||||||
|
datastoreType: "etcdv3"
|
||||||
|
etcdEndpoints: "{{ etcd_access_addresses }}"
|
||||||
|
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||||
|
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||||
|
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
|
|
@ -28,6 +28,9 @@ spec:
|
||||||
- name: lib-modules
|
- name: lib-modules
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/modules
|
path: /lib/modules
|
||||||
|
- name: var-lib-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/calico
|
||||||
- name: var-run-calico
|
- name: var-run-calico
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/run/calico
|
path: /var/run/calico
|
||||||
|
@ -202,6 +205,9 @@ spec:
|
||||||
- mountPath: /var/run/calico
|
- mountPath: /var/run/calico
|
||||||
name: var-run-calico
|
name: var-run-calico
|
||||||
readOnly: false
|
readOnly: false
|
||||||
|
- mountPath: /var/lib/calico
|
||||||
|
name: var-lib-calico
|
||||||
|
readOnly: false
|
||||||
- name: "canal-certs"
|
- name: "canal-certs"
|
||||||
mountPath: "{{ canal_cert_dir }}"
|
mountPath: "{{ canal_cert_dir }}"
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
|
|
@ -136,6 +136,7 @@
|
||||||
- "{{ bin_dir }}/kubeadm"
|
- "{{ bin_dir }}/kubeadm"
|
||||||
- "{{ bin_dir }}/helm"
|
- "{{ bin_dir }}/helm"
|
||||||
- "{{ bin_dir }}/calicoctl"
|
- "{{ bin_dir }}/calicoctl"
|
||||||
|
- "{{ bin_dir }}/calico-upgrade"
|
||||||
- "{{ bin_dir }}/weave"
|
- "{{ bin_dir }}/weave"
|
||||||
- /var/lib/rkt
|
- /var/lib/rkt
|
||||||
- /etc/vault
|
- /etc/vault
|
||||||
|
|
|
@ -78,10 +78,19 @@
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
- { role: network_plugin, tags: network }
|
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
|
#Upgrade calico on all masters and nodes
|
||||||
|
- hosts: kube-master:kube-node
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
serial: "{{ serial | default('20%') }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: network_plugin, tags: network }
|
||||||
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
|
||||||
#Finally handle worker upgrades, based on given batch size
|
#Finally handle worker upgrades, based on given batch size
|
||||||
- hosts: kube-node:!kube-master
|
- hosts: kube-node:!kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -90,7 +99,6 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
@ -102,14 +110,6 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
|
|
||||||
- hosts: kube-master
|
|
||||||
any_errors_fatal: true
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults}
|
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
|
||||||
- { role: kubernetes/client, tags: client }
|
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -127,4 +127,4 @@
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
Loading…
Reference in a new issue