Compare commits
78 commits
Author | SHA1 | Date | |
---|---|---|---|
|
4661e7db01 | ||
|
ba1d3dcddc | ||
|
e7f8d5a987 | ||
|
26183c2523 | ||
|
0f7b9363f9 | ||
|
b0b569615e | ||
|
65aa9213d4 | ||
|
44d1f83ee9 | ||
|
b19d109a12 | ||
|
4e52da6a35 | ||
|
c1a686bf47 | ||
|
eb8dd77ab6 | ||
|
cd46286523 | ||
|
e12850be55 | ||
|
5e4f3cabf1 | ||
|
df00b1d69d | ||
|
d74dcfd3b8 | ||
|
c1c720422f | ||
|
bac71fa7cb | ||
|
ac1aa4d591 | ||
|
c22915a08c | ||
|
0ea43289e2 | ||
|
01e527abf1 | ||
|
704a054064 | ||
|
8c693e8739 | ||
|
9ecbf75cb4 | ||
|
591a51aa75 | ||
|
76a1697cf1 | ||
|
1216a0d52d | ||
|
f4d3a4a5ad | ||
|
3c8ad073cd | ||
|
53b9388b82 | ||
|
f26cc9f75b | ||
|
5563ed8084 | ||
|
a02e9206fe | ||
|
e7cc686beb | ||
|
5d4fcbc5a1 | ||
|
ba348c9a00 | ||
|
b0f2471f0e | ||
|
fbdc2b3e20 | ||
|
557139a8cf | ||
|
daea9f3d21 | ||
|
ac23d89a1a | ||
|
3292887cae | ||
|
c7658c0256 | ||
|
716a66e5d3 | ||
|
efd138e752 | ||
|
40857b9859 | ||
|
176df83e02 | ||
|
60b405a7b7 | ||
|
d48a4bbc85 | ||
|
10b08d8840 | ||
|
189ce380bd | ||
|
5f06864582 | ||
|
3ad248b007 | ||
|
754a54adfc | ||
|
960844d87b | ||
|
6bde4e3fb3 | ||
|
3725c80a71 | ||
|
d94f32c160 | ||
|
6b184905e6 | ||
|
782c3dc1c4 | ||
|
f6b806e971 | ||
|
dee0594d74 | ||
|
f8b15a714c | ||
|
d8ab76aa04 | ||
|
8a5139e54c | ||
|
1727b3501f | ||
|
4ed05cf655 | ||
|
8105cd7fbe | ||
|
cf84a6bd3b | ||
|
b80f612d29 | ||
|
5e06ee6ea6 | ||
|
4de5a070e1 | ||
|
b198cd23d0 | ||
|
74e8f58c57 | ||
|
803f89e82b | ||
|
a652a3b3b5 |
125 changed files with 995 additions and 3722 deletions
|
@ -42,12 +42,10 @@ packet_centos7-flannel-containerd-addons-ha:
|
|||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-crio:
|
||||
packet_centos8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
|
||||
ENV KUBE_VERSION=v1.19.7
|
||||
ENV KUBE_VERSION=v1.19.9
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
|
|
12
README.md
12
README.md
|
@ -108,7 +108,7 @@ vagrant up
|
|||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
@ -116,20 +116,20 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.19.7
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.19.9
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.3.9
|
||||
- [cri-o](http://cri-o.io/) v1.19 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.16.5
|
||||
- [calico](https://github.com/projectcalico/calico) v3.16.9
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.6
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.8
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.5.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.1.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.6.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
- Application
|
||||
|
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
|
@ -28,7 +28,7 @@ SUPPORTED_OS = {
|
|||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
|
|
14
cluster.yml
14
cluster.yml
|
@ -4,6 +4,7 @@
|
|||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
@ -12,6 +13,7 @@
|
|||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
@ -23,6 +25,7 @@
|
|||
- hosts: k8s-cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
|
@ -32,6 +35,7 @@
|
|||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
|
@ -44,6 +48,7 @@
|
|||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
|
@ -56,6 +61,7 @@
|
|||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
|
@ -63,6 +69,7 @@
|
|||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
|
@ -72,6 +79,7 @@
|
|||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
|
@ -81,6 +89,7 @@
|
|||
- hosts: calico-rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
@ -88,14 +97,15 @@
|
|||
- hosts: kube-master[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
|
@ -107,6 +117,7 @@
|
|||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
@ -114,6 +125,7 @@
|
|||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
|
|
@ -42,6 +42,10 @@ The type of the vm. Supported values are `standard` or `vmss`. If vm is type of
|
|||
|
||||
The name of the virtual network your instances are in, can be retrieved via `az network vnet list`
|
||||
|
||||
### azure\_vnet\_resource\_group
|
||||
|
||||
The name of the resource group that contains the vnet.
|
||||
|
||||
### azure\_subnet\_name
|
||||
|
||||
The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||
|
@ -50,6 +54,18 @@ The name of the subnet your instances are in, can be retrieved via `az network v
|
|||
|
||||
The name of the network security group your instances are in, can be retrieved via `az network nsg list`
|
||||
|
||||
### azure\_security\_group\_resource\_group
|
||||
|
||||
The name of the resource group that contains the network security group. Defaults to `azure_vnet_resource_group`
|
||||
|
||||
### azure\_route\_table\_name
|
||||
|
||||
The name of the route table used with your instances.
|
||||
|
||||
### azure\_route\_table\_resource\_group
|
||||
|
||||
The name of the resource group that contains the route table. Defaults to `azure_vnet_resource_group`
|
||||
|
||||
### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||
|
||||
These will have to be generated first:
|
||||
|
|
|
@ -24,8 +24,8 @@ ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x
|
|||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
|
|
@ -28,14 +28,13 @@ cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_ar
|
|||
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# If using Calico
|
||||
calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# If using Calico with kdd
|
||||
calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
||||
|
||||
# CentOS/Redhat
|
||||
## Docker
|
||||
## Docker / Containerd
|
||||
docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
## Containerd
|
||||
extras_rh_repo_base_url: "{{ yum_repo }}/centos/{{ ansible_distribution_major_version }}/extras/$basearch"
|
||||
extras_rh_repo_gpgkey: "{{ yum_repo }}/containerd/gpg"
|
||||
|
||||
# Fedora
|
||||
## Docker
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# openSUSE Leap 15.0 and Tumbleweed
|
||||
# openSUSE Leap 15.2 and Tumbleweed
|
||||
|
||||
openSUSE Leap installation Notes:
|
||||
|
||||
|
|
|
@ -284,20 +284,6 @@ follows:
|
|||
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||
* Add-ons (such as KubeDNS)
|
||||
|
||||
## Upgrade considerations
|
||||
|
||||
Kubespray supports rotating certificates used for etcd and Kubernetes
|
||||
components, but some manual steps may be required. If you have a pod that
|
||||
requires use of a service token and is deployed in a namespace other than
|
||||
`kube-system`, you will need to manually delete the affected pods after
|
||||
rotating certificates. This is because all service account tokens are dependent
|
||||
on the apiserver token that is used to generate them. When the certificate
|
||||
rotates, all service account tokens must be rotated as well. During the
|
||||
kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
|
||||
recreated. All other invalidated service account tokens are cleaned up
|
||||
automatically, but other pods are not deleted out of an abundance of caution
|
||||
for impact to user deployed pods.
|
||||
|
||||
### Component-based upgrades
|
||||
|
||||
A deployer may want to upgrade specific components in order to minimize risk
|
||||
|
|
25
facts.yml
25
facts.yml
|
@ -7,13 +7,20 @@
|
|||
setup:
|
||||
gather_subset: '!all'
|
||||
|
||||
- name: Gather necessary facts
|
||||
# filter match the following variables:
|
||||
# ansible_default_ipv4
|
||||
# ansible_default_ipv6
|
||||
# ansible_all_ipv4_addresses
|
||||
# ansible_all_ipv6_addresses
|
||||
- name: Gather necessary facts (network)
|
||||
setup:
|
||||
gather_subset: '!all,!min,network,hardware'
|
||||
filter: "{{ item }}"
|
||||
loop:
|
||||
- ansible_distribution_major_version
|
||||
- ansible_default_ipv4
|
||||
- ansible_all_ipv4_addresses
|
||||
- ansible_memtotal_mb
|
||||
- ansible_swaptotal_mb
|
||||
gather_subset: '!all,!min,network'
|
||||
filter: "ansible_*_ipv[46]*"
|
||||
|
||||
# filter match the following variables:
|
||||
# ansible_memtotal_mb
|
||||
# ansible_swaptotal_mb
|
||||
- name: Gather necessary facts (hardware)
|
||||
setup:
|
||||
gather_subset: '!all,!min,hardware'
|
||||
filter: "ansible_*total_mb"
|
||||
|
|
|
@ -27,6 +27,11 @@ bin_dir: /usr/local/bin
|
|||
# valid options are "nginx" or "haproxy"
|
||||
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
||||
|
||||
## If the cilium is going to be used in strict mode, we can use the
|
||||
## localhost connection and not use the external LB. If this parameter is
|
||||
## not specified, the first node to connect to kubeapi will be used.
|
||||
# use_localhost_as_kubeapi_loadbalancer: true
|
||||
|
||||
## Local loadbalancer should use this port
|
||||
## And must be set port 6443
|
||||
loadbalancer_apiserver_port: 6443
|
||||
|
|
|
@ -10,9 +10,11 @@
|
|||
# azure_location:
|
||||
# azure_subnet_name:
|
||||
# azure_security_group_name:
|
||||
# azure_security_group_resource_group:
|
||||
# azure_vnet_name:
|
||||
# azure_vnet_resource_group:
|
||||
# azure_route_table_name:
|
||||
# azure_route_table_resource_group:
|
||||
# supported values are 'standard' or 'vmss'
|
||||
# azure_vmtype: standard
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.19.7
|
||||
kube_version: v1.19.9
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
|
@ -310,5 +310,6 @@ persistent_volumes_enabled: false
|
|||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
## Force regeneration of kubernetes control plane certificates without the need of bumping the cluster version
|
||||
force_certificate_regeneration: false
|
||||
|
||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||
auto_renew_certificates: false
|
||||
|
|
|
@ -32,14 +32,16 @@
|
|||
|
||||
# [Optional] Calico: If using Calico network plugin
|
||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
||||
|
||||
## CentOS/Redhat
|
||||
### Docker
|
||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||
### By default we enable those repo automatically
|
||||
# rhel_enable_repos: false
|
||||
### Docker / Containerd
|
||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# extras_rh_repo_base_url: "{{ yum_repo }}/centos/$releasever/extras/$basearch"
|
||||
# extras_rh_repo_gpgkey: "{{ yum_repo }}/containerd/gpg"
|
||||
|
||||
## Fedora
|
||||
### Docker
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
|
||||
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# [bastion]
|
||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||
|
||||
[kube-master]
|
||||
|
|
|
@ -4,22 +4,27 @@
|
|||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- hosts: "{{ groups['etcd'] | first }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: recover_control_plane/etcd }
|
||||
|
||||
- hosts: "{{ groups['kube-master'] | first }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: recover_control_plane/master }
|
||||
|
||||
- include: cluster.yml
|
||||
|
||||
- hosts: "{{ groups['kube-master'] }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: recover_control_plane/post-recover }
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
||||
gather_facts: no
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
vars_prompt:
|
||||
name: "delete_nodes_confirmation"
|
||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||
|
@ -18,6 +19,7 @@
|
|||
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: no
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os }
|
||||
|
@ -25,6 +27,7 @@
|
|||
|
||||
- hosts: "{{ node | default('kube-node') }}"
|
||||
gather_facts: no
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
|
||||
- { role: bootstrap-os, tags: bootstrap-os, when: reset_nodes|default(True)|bool }
|
||||
|
@ -34,6 +37,7 @@
|
|||
# Currently cannot remove first master or etcd
|
||||
- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}"
|
||||
gather_facts: no
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
|
||||
- { role: bootstrap-os, tags: bootstrap-os, when: reset_nodes|default(True)|bool }
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
ansible==2.9.16
|
||||
jinja2==2.11.1
|
||||
ansible==2.9.18
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
@ -25,6 +26,7 @@
|
|||
msg: "Reset confirmation failed"
|
||||
when: reset_confirmation != "yes"
|
||||
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: reset, tags: reset }
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
- name: set bastion host IP
|
||||
- name: set bastion host IP and port
|
||||
set_fact:
|
||||
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
|
||||
bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}"
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
|
||||
|
|
|
@ -15,4 +15,4 @@ Host {{ bastion_ip }}
|
|||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -p {{ bastion_port }} {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
dest: /etc/yum.repos.d/public-yum-ol7.repo
|
||||
when:
|
||||
- use_oracle_public_repo|default(true)
|
||||
- '"Oracle" in os_release.stdout'
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) < 7.6
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
|||
- ol7_developer_EPEL
|
||||
when:
|
||||
- use_oracle_public_repo|default(true)
|
||||
- '"Oracle" in os_release.stdout'
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) < 7.6
|
||||
|
||||
- name: Enable Oracle Linux repo
|
||||
|
@ -51,7 +51,8 @@
|
|||
- { option: "enabled", value: "1" }
|
||||
- { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/x86_64/" }
|
||||
when:
|
||||
- '"Oracle" in os_release.stdout'
|
||||
- use_oracle_public_repo|default(true)
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) >= 7.6
|
||||
|
||||
- name: Install EPEL for Oracle Linux repo package
|
||||
|
@ -59,7 +60,8 @@
|
|||
name: "oracle-epel-release-el{{ ansible_distribution_major_version }}"
|
||||
state: present
|
||||
when:
|
||||
- '"Oracle" in os_release.stdout'
|
||||
- use_oracle_public_repo|default(true)
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) >= 7.6
|
||||
|
||||
# CentOS ships with python installed
|
||||
|
@ -67,6 +69,9 @@
|
|||
- name: Check presence of fastestmirror.conf
|
||||
stat:
|
||||
path: /etc/yum/pluginconf.d/fastestmirror.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: fastestmirror
|
||||
|
||||
# the fastestmirror plugin can actually slow down Ansible deployments
|
||||
|
|
|
@ -51,20 +51,20 @@
|
|||
# This command should always run, even in check mode
|
||||
check_mode: false
|
||||
when:
|
||||
- '"bionic" in os_release.stdout'
|
||||
- '''UBUNTU_CODENAME=bionic'' in os_release.stdout_lines'
|
||||
|
||||
- name: Change Network Name Resolution configuration
|
||||
raw: sed -i 's/^DNSSEC=yes/DNSSEC=allow-downgrade/g' /etc/systemd/resolved.conf
|
||||
become: true
|
||||
when:
|
||||
- '"bionic" in os_release.stdout'
|
||||
- '''UBUNTU_CODENAME=bionic'' in os_release.stdout_lines'
|
||||
- need_dnssec_allow_downgrade.rc
|
||||
|
||||
- name: Restart systemd-resolved service
|
||||
raw: systemctl restart systemd-resolved
|
||||
become: true
|
||||
when:
|
||||
- '"bionic" in os_release.stdout'
|
||||
- '''UBUNTU_CODENAME=bionic'' in os_release.stdout_lines'
|
||||
- need_dnssec_allow_downgrade.rc
|
||||
|
||||
- name: Install python3
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
- name: Check that /etc/sysconfig/proxy file exists
|
||||
stat:
|
||||
path: /etc/sysconfig/proxy
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: stat_result
|
||||
|
||||
- name: Create the /etc/sysconfig/proxy empty file
|
||||
|
|
|
@ -60,9 +60,34 @@
|
|||
- rh_subscription_username is defined
|
||||
- rh_subscription_status.changed
|
||||
|
||||
# container-selinux is in extras repo
|
||||
- name: Enable RHEL 7 repos
|
||||
rhsm_repository:
|
||||
name:
|
||||
- "rhel-7-server-rpms"
|
||||
- "rhel-7-server-extras-rpms"
|
||||
state: enabled
|
||||
when:
|
||||
- rhel_enable_repos | default(True)
|
||||
- ansible_distribution_major_version == "7"
|
||||
|
||||
# container-selinux is in appstream repo
|
||||
- name: Enable RHEL 8 repos
|
||||
rhsm_repository:
|
||||
name:
|
||||
- "rhel-8-for-*-baseos-rpms"
|
||||
- "rhel-8-for-*-appstream-rpms"
|
||||
state: enabled
|
||||
when:
|
||||
- rhel_enable_repos | default(True)
|
||||
- ansible_distribution_major_version == "8"
|
||||
|
||||
- name: Check presence of fastestmirror.conf
|
||||
stat:
|
||||
path: /etc/yum/pluginconf.d/fastestmirror.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: fastestmirror
|
||||
|
||||
# the fastestmirror plugin can actually slow down Ansible deployments
|
||||
|
|
|
@ -7,32 +7,34 @@
|
|||
check_mode: false
|
||||
|
||||
- include_tasks: bootstrap-centos.yml
|
||||
when: '"CentOS" in os_release.stdout or "Oracle" in os_release.stdout'
|
||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-redhat.yml
|
||||
when: '"Red Hat Enterprise Linux" in os_release.stdout'
|
||||
when: '''ID="rhel"'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-clearlinux.yml
|
||||
when: '"Clear Linux OS" in os_release.stdout'
|
||||
when: '''ID=clear-linux-os'' in os_release.stdout_lines'
|
||||
|
||||
# Fedora CoreOS
|
||||
- include_tasks: bootstrap-fedora-coreos.yml
|
||||
when: '"ID=fedora" in os_release.stdout and "VARIANT_ID=coreos" in os_release.stdout'
|
||||
when:
|
||||
- '''ID=fedora'' in os_release.stdout_lines'
|
||||
- '''VARIANT_ID=coreos'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-flatcar.yml
|
||||
when:
|
||||
- '"Flatcar" in os_release.stdout'
|
||||
- '"ID=fedora" not in os_release.stdout'
|
||||
when: '''ID=flatcar'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-debian.yml
|
||||
when: '"Debian" in os_release.stdout or "Ubuntu" in os_release.stdout'
|
||||
when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
|
||||
|
||||
# Fedora "classic"
|
||||
- include_tasks: bootstrap-fedora.yml
|
||||
when:
|
||||
- '"Fedora" in os_release.stdout'
|
||||
- '"VARIANT_ID=coreos" not in os_release.stdout'
|
||||
- '''ID=fedora'' in os_release.stdout_lines'
|
||||
- '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-opensuse.yml
|
||||
when: '"openSUSE" in os_release.stdout'
|
||||
when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
|
||||
|
||||
- name: Create remote_tmp for it is used by another module
|
||||
file:
|
||||
|
|
|
@ -34,9 +34,6 @@ containerd_repo_key_info:
|
|||
containerd_repo_info:
|
||||
repos:
|
||||
|
||||
extras_rh_repo_base_url: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/"
|
||||
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
|
||||
|
||||
# Ubuntu docker-ce repo
|
||||
containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
containerd_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||
|
@ -68,6 +65,7 @@ containerd_default_runtime:
|
|||
# type: io.containerd.kata.v2
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# privileged_without_host_devices: true
|
||||
containerd_runtimes: []
|
||||
|
||||
containerd_untrusted_runtime_type: ''
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
---
|
||||
- name: ensure containerd repository public key is installed
|
||||
action: "{{ containerd_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
apt_key:
|
||||
id: "{{ item }}"
|
||||
url: "{{ containerd_repo_key_info.url }}"
|
||||
state: present
|
||||
|
@ -11,18 +10,14 @@
|
|||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ containerd_repo_key_info.repo_keys }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: ensure containerd repository is enabled
|
||||
action: "{{ containerd_repo_info.pkg_repo }}"
|
||||
args:
|
||||
apt_repository:
|
||||
repo: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ containerd_repo_info.repos }}"
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- containerd_repo_info.repos|length > 0
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Configure containerd repository on Fedora
|
||||
template:
|
||||
|
@ -30,29 +25,8 @@
|
|||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
when: ansible_distribution == "Fedora"
|
||||
|
||||
- name: Configure containerd repository on RedHat/CentOS
|
||||
- name: Configure containerd repository on RedHat/OracleLinux/CentOS
|
||||
template:
|
||||
src: "rh_containerd.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"]
|
||||
|
||||
- name: check if container-selinux is available
|
||||
yum:
|
||||
list: "container-selinux"
|
||||
register: yum_result
|
||||
when: ansible_distribution in ["CentOS","RedHat"]
|
||||
|
||||
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
|
||||
yum_repository:
|
||||
name: extras
|
||||
description: "CentOS-{{ ansible_distribution_major_version }} - Extras"
|
||||
state: present
|
||||
baseurl: "{{ extras_rh_repo_base_url }}"
|
||||
file: "extras"
|
||||
gpgcheck: "{{ 'yes' if extras_rh_repo_gpgkey else 'no' }}"
|
||||
gpgkey: "{{ extras_rh_repo_gpgkey }}"
|
||||
keepcache: "{{ containerd_rpm_keepcache | default('1') }}"
|
||||
proxy: " {{ http_proxy | default('_none_') }}"
|
||||
when:
|
||||
- ansible_distribution in ["CentOS","RedHat"]
|
||||
- yum_result.results | length == 0
|
||||
when: ansible_distribution in ["CentOS", "OracleLinux", "RedHat"]
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
- name: check if fedora coreos
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: set is_ostree
|
||||
|
@ -13,7 +16,7 @@
|
|||
fail:
|
||||
msg: "{{ ansible_distribution }} is not supported by containerd."
|
||||
when:
|
||||
- not ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian", "Fedora"]
|
||||
- not ansible_distribution in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora"]
|
||||
|
||||
- name: gather os specific variables
|
||||
include_vars: "{{ item }}"
|
||||
|
@ -81,28 +84,33 @@
|
|||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
- name: Set containerd pin priority to apt_preferences on Debian family
|
||||
template:
|
||||
src: "apt_preferences.d/debian_containerd.j2"
|
||||
copy:
|
||||
content: |
|
||||
Package: {{ containerd_package }}
|
||||
Pin: version {{ containerd_version }}*
|
||||
Pin-Priority: 1001
|
||||
dest: "/etc/apt/preferences.d/containerd"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- not is_ostree
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: ensure containerd packages are installed
|
||||
action: "{{ containerd_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: "{{ item.name }}"
|
||||
force: "{{ item.force | default(omit) }}"
|
||||
package:
|
||||
name: "{{ containerd_package_info.pkgs }}"
|
||||
state: present
|
||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||
enablerepo: "{{ item.repo | default(omit) }}"
|
||||
module_defaults:
|
||||
apt:
|
||||
update_cache: true
|
||||
dnf:
|
||||
enablerepo: "{{ containerd_package_info.enablerepo | default(omit) }}"
|
||||
yum:
|
||||
enablerepo: "{{ containerd_package_info.enablerepo | default(omit) }}"
|
||||
zypper:
|
||||
update_cache: true
|
||||
register: containerd_task_result
|
||||
until: containerd_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ containerd_package_info.pkgs }}"
|
||||
notify: restart containerd
|
||||
when:
|
||||
- not is_ostree
|
||||
|
@ -110,3 +118,14 @@
|
|||
|
||||
- include_role:
|
||||
name: container-engine/crictl
|
||||
|
||||
# you can sometimes end up in a state where everything is installed
|
||||
# but containerd was not started / enabled
|
||||
- name: flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: ensure containerd is started and enabled
|
||||
service:
|
||||
name: containerd
|
||||
enabled: yes
|
||||
state: started
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
Package: {{ containerd_package }}
|
||||
Pin: version {{ containerd_version }}*
|
||||
Pin-Priority: 1001
|
|
@ -42,6 +42,7 @@ disabled_plugins = ["restart"]
|
|||
runtime_type = "{{ containerd_default_runtime.type }}"
|
||||
runtime_engine = "{{ containerd_default_runtime.engine }}"
|
||||
runtime_root = "{{ containerd_default_runtime.root }}"
|
||||
privileged_without_host_devices = {{ containerd_default_runtime.privileged_without_host_devices|default(false)|lower }}
|
||||
|
||||
{% if kata_containers_enabled %}
|
||||
[plugins.cri.containerd.runtimes.kata-qemu]
|
||||
|
@ -55,6 +56,7 @@ disabled_plugins = ["restart"]
|
|||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
privileged_without_host_devices = {{ runtime.privileged_without_host_devices|default(false)|lower }}
|
||||
{% endfor %}
|
||||
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
|
|
|
@ -1,18 +1,14 @@
|
|||
---
|
||||
containerd_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
force: false
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
url: '{{ containerd_debian_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- '{{ containerd_debian_repo_repokey }}'
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ containerd_debian_repo_base_url }}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
containerd_package_info:
|
||||
pkg_mgr: dnf
|
||||
enablerepo: "docker-ce"
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
repo: "docker-ce"
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
---
|
||||
containerd_package_info:
|
||||
pkg_mgr: yum
|
||||
enablerepo: "docker-ce"
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
repo: "docker-ce"
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
|
|
|
@ -3,15 +3,5 @@
|
|||
containerd_package: containerd
|
||||
|
||||
containerd_package_info:
|
||||
pkg_mgr: zypper
|
||||
pkgs:
|
||||
- name: "{{ containerd_package }}"
|
||||
state: latest
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
- "{{ containerd_package }}"
|
||||
|
|
|
@ -1,18 +1,14 @@
|
|||
---
|
||||
containerd_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
force: false
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
url: '{{ containerd_ubuntu_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- '{{ containerd_ubuntu_repo_repokey }}'
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ containerd_ubuntu_repo_base_url }}
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
- name: check if fedora coreos
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: set is_ostree
|
||||
|
@ -94,6 +97,9 @@
|
|||
- name: Check if already installed
|
||||
stat:
|
||||
path: "/bin/crio"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: need_bootstrap_crio
|
||||
when: is_ostree
|
||||
|
||||
|
@ -134,15 +140,13 @@
|
|||
owner: root
|
||||
mode: 0755
|
||||
|
||||
- name: Remove metacopy mount options for older kernels
|
||||
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
|
||||
- name: Set metacopy mount options correctly
|
||||
ini_file:
|
||||
dest: /etc/containers/storage.conf
|
||||
section: storage.options.overlay
|
||||
option: mountopt
|
||||
value: "\"nodev\""
|
||||
when:
|
||||
- ansible_distribution == "CentOS"
|
||||
- ansible_distribution_major_version == "7"
|
||||
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
|
||||
|
||||
- name: Create directory registries configs
|
||||
file:
|
||||
|
|
|
@ -293,6 +293,7 @@ pinns_path = ""
|
|||
runtime_path = "{{ runtime.path }}"
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
privileged_without_host_devices = {{ runtime.privileged_without_host_devices|default(false)|lower }}
|
||||
{% endfor %}
|
||||
|
||||
# Kata Containers with the Firecracker VMM
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
template:
|
||||
src: crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: bin
|
||||
owner: root
|
||||
mode: 0644
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
- name: Check if binary exists
|
||||
stat:
|
||||
path: "{{ crun_bin_dir }}/crun"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: crun_stat
|
||||
|
||||
# TODO: use download_file.yml
|
||||
|
|
|
@ -28,13 +28,12 @@ docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
|||
# Ubuntu docker-ce repo
|
||||
docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||
docker_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
|
||||
# Debian docker-ce repo
|
||||
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
|
||||
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
|
||||
docker_debian_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
|
||||
docker_bin_dir: "/usr/bin"
|
||||
# CentOS/RedHat Extras repo
|
||||
extras_rh_repo_base_url: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/"
|
||||
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
|
||||
|
||||
# flag to enable/disable docker cleanup
|
||||
docker_orphan_clean_up: false
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
- name: check if fedora coreos
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: set is_ostree
|
||||
|
@ -54,8 +57,7 @@
|
|||
- import_tasks: pre-upgrade.yml
|
||||
|
||||
- name: ensure docker-ce repository public key is installed
|
||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
apt_key:
|
||||
id: "{{ item }}"
|
||||
url: "{{ docker_repo_key_info.url }}"
|
||||
state: present
|
||||
|
@ -65,15 +67,14 @@
|
|||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "RedHat", "Suse", "ClearLinux"] or is_ostree)
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: ensure docker-ce repository is enabled
|
||||
action: "{{ docker_repo_info.pkg_repo }}"
|
||||
args:
|
||||
apt_repository:
|
||||
repo: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ docker_repo_info.repos }}"
|
||||
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "RedHat", "Suse", "ClearLinux"] or is_ostree) and (docker_repo_info.repos|length > 0)
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Configure docker repository on Fedora
|
||||
template:
|
||||
|
@ -87,63 +88,49 @@
|
|||
dest: "{{ yum_repo_dir }}/docker-ce.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_ostree
|
||||
|
||||
- name: check if container-selinux is available
|
||||
yum:
|
||||
list: "container-selinux"
|
||||
register: yum_result
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_ostree
|
||||
|
||||
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
|
||||
yum_repository:
|
||||
name: extras
|
||||
description: "CentOS-{{ ansible_distribution_major_version }} - Extras"
|
||||
state: present
|
||||
baseurl: "{{ extras_rh_repo_base_url }}"
|
||||
file: "extras"
|
||||
gpgcheck: "{{ 'yes' if extras_rh_repo_gpgkey else 'no' }}"
|
||||
gpgkey: "{{ extras_rh_repo_gpgkey }}"
|
||||
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
|
||||
proxy: " {{ http_proxy | default('_none_') }}"
|
||||
when:
|
||||
- ansible_distribution in ["CentOS","RedHat"] and not is_ostree
|
||||
- yum_result.results | length == 0
|
||||
- name: Remove dpkg hold
|
||||
dpkg_selections:
|
||||
name: "{{ item }}"
|
||||
selection: install
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
changed_when: false
|
||||
with_items:
|
||||
- "{{ containerd_package }}"
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
|
||||
- name: ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: "{{ item.name }}"
|
||||
force: "{{ item.force|default(omit) }}"
|
||||
state: "{{ item.state | default('present') }}"
|
||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||
enablerepo: "{{ item.repo | default(omit) }}"
|
||||
register: docker_task_result
|
||||
until: docker_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
notify: restart docker
|
||||
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_ostree) and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: Ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state | default('present') }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
package:
|
||||
name: "{{ docker_package_info.pkgs }}"
|
||||
state: "{{ docker_package_info.state | default('present') }}"
|
||||
module_defaults:
|
||||
apt:
|
||||
update_cache: true
|
||||
dnf:
|
||||
enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}"
|
||||
yum:
|
||||
enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}"
|
||||
zypper:
|
||||
update_cache: true
|
||||
register: docker_task_result
|
||||
until: docker_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
notify: restart docker
|
||||
when: ansible_os_family in ["ClearLinux"]
|
||||
when:
|
||||
- not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
|
||||
- not is_ostree
|
||||
- docker_package_info.pkgs|length > 0
|
||||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
- name: Tell Debian hosts not to change the docker version with apt upgrade
|
||||
dpkg_selections:
|
||||
name: "{{ item }}"
|
||||
selection: hold
|
||||
when: ansible_os_family in ["Debian"]
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
changed_when: false
|
||||
with_items:
|
||||
- "{{ containerd_package }}"
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
|
||||
|
|
|
@ -9,6 +9,5 @@ docker_versioned_pkg:
|
|||
docker_version: "latest"
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
docker_package_info:
|
||||
pkg_mgr: swupd
|
||||
pkgs:
|
||||
- name: "containers-basic"
|
||||
- "containers-basic"
|
||||
|
|
|
@ -15,23 +15,17 @@ docker_cli_versioned_pkg:
|
|||
'20.10': docker-ce-cli=5:20.10.2~3-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
force: yes
|
||||
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
force: yes
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
force: yes
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
- "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
url: '{{ docker_debian_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
- '{{ docker_debian_repo_repokey }}'
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ docker_debian_repo_base_url }}
|
||||
|
|
|
@ -14,8 +14,8 @@ docker_cli_versioned_pkg:
|
|||
'20.10': docker-ce-cli-20.10.2-3.fc{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: dnf
|
||||
enablerepo: "docker-ce"
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
- "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
|
|
@ -17,19 +17,8 @@ docker_cli_versioned_pkg:
|
|||
'20.10': docker-ce-cli-20.10.2-3.el{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
enablerepo: "docker-ce"
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
repo: "docker-ce"
|
||||
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
repo: "docker-ce"
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
repo: "docker-ce"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
- "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
|
|
@ -1,15 +1,6 @@
|
|||
---
|
||||
docker_package_info:
|
||||
pkg_mgr: zypper
|
||||
state: latest
|
||||
pkgs:
|
||||
- name: docker
|
||||
- name: containerd
|
||||
state: latest
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
- docker
|
||||
- containerd
|
||||
|
|
|
@ -15,23 +15,17 @@ docker_cli_versioned_pkg:
|
|||
'20.10': docker-ce-cli=5:20.10.2~3-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
force: yes
|
||||
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
force: yes
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
force: yes
|
||||
- "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
- "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
url: '{{ docker_ubuntu_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
- '{{ docker_ubuntu_repo_repokey }}'
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
|
|
|
@ -64,7 +64,7 @@ quay_image_repo: "quay.io"
|
|||
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: "v3.16.5"
|
||||
calico_version: "v3.16.9"
|
||||
calico_ctl_version: "{{ calico_version }}"
|
||||
calico_cni_version: "{{ calico_version }}"
|
||||
calico_policy_version: "{{ calico_version }}"
|
||||
|
@ -77,13 +77,13 @@ cni_version: "v0.9.0"
|
|||
|
||||
weave_version: 2.7.0
|
||||
pod_infra_version: "3.3"
|
||||
cilium_version: "v1.8.6"
|
||||
kube_ovn_version: "v1.5.2"
|
||||
cilium_version: "v1.8.8"
|
||||
kube_ovn_version: "v1.6.1"
|
||||
kube_router_version: "v1.1.1"
|
||||
multus_version: "v3.6"
|
||||
multus_version: "v3.7"
|
||||
ovn4nfv_ovn_image_version: "v1.0.0"
|
||||
ovn4nfv_k8s_plugin_image_version: "v1.1.0"
|
||||
helm_version: "v3.3.4"
|
||||
helm_version: "v3.5.3"
|
||||
|
||||
# Get kubernetes major version (i.e. 1.17.4 => 1.17)
|
||||
kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}"
|
||||
|
@ -99,6 +99,7 @@ kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release
|
|||
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
calicoctl_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
||||
crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
|
@ -117,6 +118,14 @@ crictl_checksums:
|
|||
# Kubernetes versions above Kubespray's current target version are untested and should be used with caution.
|
||||
kubelet_checksums:
|
||||
arm:
|
||||
v1.20.5: cd04519bc8271a990380b62bd5b397ed5e21f9d81d731792b53453baea3d7cd5
|
||||
v1.20.4: 6c2dbd275d0413124bb56f347821aa11003749c3e5d31ebddec64b14dc74ea18
|
||||
v1.20.3: 64a2f413987483e131a6cbbf641c3f8a4d506d5373c04d0765ccf96b4886597d
|
||||
v1.20.2: fa4e9891c25830cd2b5ed52b524878d04554094a0578e039c2b3c28ea85a5b12
|
||||
v1.20.1: 576d89284c8fa758d7d34d789444e9a5a03f24b6400cc94bf103c1c796898dc7
|
||||
v1.20.0: 132ea2f821ec1537af34b3a1414071416149972ce93619f71730c8dd741e357c
|
||||
v1.19.9: 0c2c22f226189764b7f4a73614e12ff0be150640f64b691327e925db125d8950
|
||||
v1.19.8: 0a68c72574a96e7ca899610084659f156c1eeb312af7fc59aa549d47411d1dab
|
||||
v1.19.7: 59284dcf4ee3f62475d0d6d1070c64e0e5362280e8d70884513e1e1cee918cb0
|
||||
v1.19.6: 59179a52615cb0811909558425e046cc9c5c051e3b55733b96e40a9aed0967de
|
||||
v1.19.5: 112e247538b889057dd55a35350415494785c6e0f5ad54a273fcab51f4c30805
|
||||
|
@ -125,6 +134,8 @@ kubelet_checksums:
|
|||
v1.19.2: 631e686c34911a40a798817dcff89532c88bb649885f93ec66b339e227ebd974
|
||||
v1.19.1: 3985c8d02c1c2f2016fceccd9cc14865e2d047f32c8f0b42aeedcc8450de572e
|
||||
v1.19.0: bb433ef7981297bdee6ffc4e23376e8db24a0e47321ebe94bf9d4b9f7a2f0e3b
|
||||
v1.18.17: 6b413c325293a8319ec50e76b6043358304a28be17a36c0d16dbb666b93b28cf
|
||||
v1.18.16: e678fbe06c197c1a233ef3010e3e2ee4e6759d14e7247178dbe4c06843aa5c10
|
||||
v1.18.15: 27c8d5ea1f837cb0148d829944d46b028ff9d9b9edf12cd39e1cb5f65fd9f41c
|
||||
v1.18.14: e380c14330c4f34b7682a19a493769a4ba7cf9132a5ae9d2ab4109a0ba60973b
|
||||
v1.18.13: ed7e1adef473e4c49cbd4b8f2363239ff08b72f9a2c153f29c4021ef04ca9526
|
||||
|
@ -141,6 +152,14 @@ kubelet_checksums:
|
|||
v1.18.1: 04d8e0a080dcb23d579c69e769e75bd5abaa1977d43550ec891560d76f1f7f37
|
||||
v1.18.0: 985c1a1b492ccc6e46e1cd454790dae539d5b93208efb05e35114f66a183de99
|
||||
arm64:
|
||||
v1.20.5: 2d1b6f73fd0373687c7dc6582fe5ebddfa26c3b9ef128ec3955517b9db4746f2
|
||||
v1.20.4: 66bcdc7521e226e4acaa93c08e5ea7b2f57829e1a5b9decfd2b91d237e216e1d
|
||||
v1.20.3: 29fa80790edcdfc7389b4b88cf963ce29d82e5de39bdb70f7c6dff551299b6f3
|
||||
v1.20.2: 2b2d8e567439500daa9be88c4b10ee6db7126be942ac94259b506152ee38da1e
|
||||
v1.20.1: 359025e549ec6e16ffa1aead7312934233a983a918f54686a2c34ff9a6ba6016
|
||||
v1.20.0: 47ab6c4273fc3bb0cb8ec9517271d915890c5a6b0e54b2991e7a8fbbe77b06e4
|
||||
v1.19.9: 796f080c53ec50b11152558b4a744432349b800e37b80516bcdc459152766a4f
|
||||
v1.19.8: a00146c16266d54f961c40fc67f92c21967596c2d730fa3dc95868d4efb44559
|
||||
v1.19.7: 473016cf1165ab5c705bd29c65451984e48deade6ff96bf861c46e15fc52ba3f
|
||||
v1.19.6: e168d57a0a04abe58db13aaf4f54ba691115d0b1a743d549136794bbecde7dc8
|
||||
v1.19.5: 646373c5337f6f348573bbbef3d92a675fe18d3c54b752805c6d3a9f9fc22957
|
||||
|
@ -149,6 +168,8 @@ kubelet_checksums:
|
|||
v1.19.2: 86b9336aa7f6215e6f9b387bb82105657668aa8a38b0e0f7c2e647ef45c1b723
|
||||
v1.19.1: 143bed1f04cba4e6749a72abf9258d19f31e1a310f94bd041cd30ce62a1f73ff
|
||||
v1.19.0: d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf
|
||||
v1.18.17: 16cb849c8b80912e246e0cb3c7014aeb12cd6dfa9280bac717d8d941e43177f3
|
||||
v1.18.16: 712fd38b397a02058df7836f34bae3a52b5cca78d7d118be2d9a4d6e1d726f0a
|
||||
v1.18.15: c74f46e87aae7b9bb590319632fcb106b5efd998046dd47634c89bbb875d9fb6
|
||||
v1.18.14: b4ca12f6b1a5f7c838831eb3c9152ccf0f53b83a3492c812da6ac680959a4801
|
||||
v1.18.13: 953b7f9c70d04be0cf69745ab30f676375d93272362c97bb3cd527d6e27b38e4
|
||||
|
@ -165,6 +186,14 @@ kubelet_checksums:
|
|||
v1.18.1: 2181cde9e6b24055d262b78758b365363273896968df673eb13d4f17a4f69c4a
|
||||
v1.18.0: db91a26f8baa2bce017172305e717e77be5cfc4272592be8cb0155e1cfa7719e
|
||||
amd64:
|
||||
v1.20.5: 8a07891153398585a749285bb3caa2e16f0802d03ffdd5054a7ef1ef32989bea
|
||||
v1.20.4: a9f28ac492b3cbf75dee284576b2e1681e67170cd36f3f5cdc31495f1bdbf809
|
||||
v1.20.3: 8efddc4a7cc63c18f1d14bcb0879b460bbbc7358304ca10dff50907c03f04c81
|
||||
v1.20.2: 2447da8e0729ac6e4bdcb1ea7d41c8ae6e5c8565edea189434286697ccdc8d85
|
||||
v1.20.1: 2970974fa56ee90b76c7f3f8b0075f0719bb9d645aacfcef85238b68972aa9c3
|
||||
v1.20.0: ff2422571c4c1e9696e367f5f25466b96fb6e501f28aed29f414b1524a52dea0
|
||||
v1.19.9: 296e72c395f030209e712167fc5f6d2fdfe3530ca4c01bcd9bfb8c5e727c3d8d
|
||||
v1.19.8: f5cad5260c29584dd370ec13e525c945866957b1aaa719f1b871c31dc30bcb3f
|
||||
v1.19.7: d8b296825f6dd7a17287b73cd6604d32210abbba86c88fb68c1b1c5016906c54
|
||||
v1.19.6: 8162fa58f7fbb9b1f07b2b666c9759ad5c30950bc9f6f2084de1c0d9bc64a4c0
|
||||
v1.19.5: b64dbc4a1a47d21be0a67f7d0050eebc02dd610c31175865da08e3a59e0834af
|
||||
|
@ -173,6 +202,8 @@ kubelet_checksums:
|
|||
v1.19.2: 7ff6d3663e8de0c654909e7a279e386286aa7ed3fc262d021bed77c92d62780f
|
||||
v1.19.1: 2ca2a3104d4cce26db128e3a0b7a042385df4f2c51bdbe740e067fdfaa2fcdd1
|
||||
v1.19.0: 3f03e5c160a8b658d30b34824a1c00abadbac96e62c4d01bf5c9271a2debc3ab
|
||||
v1.18.17: 56348ecef546e0a301dc36193b0c8b13d3b4bea3115e167f16a5c75ffc5e11bd
|
||||
v1.18.16: b6e97539ef91523f1238d6bdcddc97dc0353396519a60c1a912bfabd9356a67b
|
||||
v1.18.15: 2d079bc9f7ac2c2d1f86df842df55f2ec8e74e01edc347994ccf1a054b9e6077
|
||||
v1.18.14: 1876c3aad83beeea1bc949fe6121d0d0d9002a0e026c15ccb568d8c5e748fba6
|
||||
v1.18.13: dbdecaec19da684055242e2684ec8a2bff1674adf9ae8b8ed87f9cb46101a87f
|
||||
|
@ -190,6 +221,14 @@ kubelet_checksums:
|
|||
v1.18.0: 3a90e7abf9910aebf9ef5845918c665afd4136a8832604ccfabca2defb35ce0f
|
||||
kubectl_checksums:
|
||||
arm:
|
||||
v1.20.5: b0b0e008dde7af257f97668620dc7b265f113833930ee03af190e7f4c4e8e545
|
||||
v1.20.4: 459bfa4f35beb3496a2685f883c105a43321288619533ec7684dce922b926f60
|
||||
v1.20.3: 72ad82d16b64dbb8c4f8eb61d710d062fe0f1ce7da94e457780948ad2e3ea9dc
|
||||
v1.20.2: a8d5b7e974200ae94a0eb3873773ec4ceffa99283f1843960d0a1b4448c2aa42
|
||||
v1.20.1: f164e6efd49a6c4a278d1317f00ee1691e7cf8531a49d10ea069f5180d3540ed
|
||||
v1.20.0: bec2b30e37056ae354ec5a86d77634a9b832ade8d9aeea7a4514cee21cb7821e
|
||||
v1.19.9: 697e0c4ce9a5511705abe252c717f7d84be9eadb3dbb905bd624d31870f62f45
|
||||
v1.19.8: 4d3cba1a8005eabbf939577253cdf593be575d0e73ac47acc5090049d8a96781
|
||||
v1.19.7: 11d49f87bf8b557066c339eea4775b32dd463fc61ce24b24744f82cb412e9277
|
||||
v1.19.6: fde28a2bff5892e461d5c72c5391da5eef20450a5a074bbbfce4f476687dac95
|
||||
v1.19.5: 10409729115f66e32263dfa8a320b74ef1e5845a9103470d3d18095ca4c1dc80
|
||||
|
@ -198,6 +237,8 @@ kubectl_checksums:
|
|||
v1.19.2: c8cad74a586dfee41436ce866c91d79c9d2053468eccde9fed976cdf32b7849f
|
||||
v1.19.1: e63bbf3161c49d60e46ffaf7d3cfd689834516205be1be881d2e652115535e93
|
||||
v1.19.0: 5885bb723a55ab95e8687e0ad52620ce4c08f76fd20c0973f5cd19c753b513c8
|
||||
v1.18.17: e153e7d980bc3af71ccede680d254457380fc2ae2044f0d67eb8685bcf519909
|
||||
v1.18.16: a6eb328325fd2b2f3c254e6a97407df265caca39292a05993b5ef997bc2a55f3
|
||||
v1.18.15: 952530dd6b272eed932e49a29625e20303b9621ba36c1cc0394476d483f6860a
|
||||
v1.18.14: b2b88ee6bea8ee17dd1c7687add53c9db5139abb7013ded77050d57b62070aa7
|
||||
v1.18.13: f3e9a4786a774441ee448edb6f842f9c6825f12245b5e4ee5ffe8b2ab1c85058
|
||||
|
@ -214,6 +255,14 @@ kubectl_checksums:
|
|||
v1.18.1: 896c90b1b9d88e121876d93718591f3ecbab880b304767806c6c9fcb3b145805
|
||||
v1.18.0: 34fc6d16c2f535ed381b5fd3a4d40b642fff4f9ff95f8250b8043a29b8c062b9
|
||||
arm64:
|
||||
v1.20.5: bafb8af51eaacea782bf6c3574c2a531aaf14637980bfa09c43ab5023773f70c
|
||||
v1.20.4: 0fd64b3e5d3fda4637c174a5aea0119b46d6cbede591a4dc9130a81481fc952f
|
||||
v1.20.3: 5bd714a08410dbe7c69e61f0209973ccdffe6fe93de5a12d1707f9a40abec60d
|
||||
v1.20.2: 37fdba9fcd43cafba11ac4f82692e41aca41b59f44fd968fd84c263d71af580f
|
||||
v1.20.1: 1ed8762306d4d09574150ffc666a04e5a79ca08e53570cd34977fab4fc7c4611
|
||||
v1.20.0: 25e4465870c99167e6c466623ed8f05a1d20fbcb48cab6688109389b52d87623
|
||||
v1.19.9: 628627d01c9eaf624ffe3cf1195947a256ea5f842851e42682057e4233a9e283
|
||||
v1.19.8: 8f037ab2aa798bbc66ebd1d52653f607f223b07813bcf98d9c1d0c0e136910ec
|
||||
v1.19.7: a0f58222e8d62b86b1148746531fa0c3b91fa208586cb068883550fc3dcd498b
|
||||
v1.19.6: 828ddf7183c647306a2432ff098a5b22fd6c68c069cb7047ffb1e2b2a967ef83
|
||||
v1.19.5: eaf26ca30f1696e72754d86aeda1886287127295809a400821f8d0ffa6329359
|
||||
|
@ -222,6 +271,8 @@ kubectl_checksums:
|
|||
v1.19.2: a460f918c03e5cd916f4ac28da137596232e344cc0755d4ceb222fc4cd203e09
|
||||
v1.19.1: 332bbdb4560f9b7fcbb96c8f2cebbc4996e409384ca07510e5c5990998158c20
|
||||
v1.19.0: d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f
|
||||
v1.18.17: 40fb9151c0a91138973d7c21e1d615bc32217d11913d9c3a9cc83b9e7d199a2a
|
||||
v1.18.16: c6dbc057558cbb202eaa37cf5a414e07dd01f95ddeec8d3789ad53c7cfcfece9
|
||||
v1.18.15: 6b4a63df325cdb523f16ffd8799745a8fdf979ef89e228c309a715671b6aa984
|
||||
v1.18.14: ac4014f7d9001375cb600a15d77e90eb6a20053afac82f167f4b7732aa073388
|
||||
v1.18.13: 8e5271e95442e373df1c67473484e387086f344a2e1445ee9f5a878ca7f4442c
|
||||
|
@ -238,6 +289,14 @@ kubectl_checksums:
|
|||
v1.18.1: 39e9645c6bed1e7340b3d764db983b9fc35326b11325fd509c3660c9f55469ed
|
||||
v1.18.0: 0de307f90502cd58e5785cdcbebeb552df81fa2399190f8a662afea9e30bc74d
|
||||
amd64:
|
||||
v1.20.5: 7f9dbb80190945a5077dc5f4230202c22f68f9bd7f20c213c3cf5a74abf55e56
|
||||
v1.20.4: 98e8aea149b00f653beeb53d4bd27edda9e73b48fed156c4a0aa1dabe4b1794c
|
||||
v1.20.3: 9124ace617387fdb78b95a9b2551d1b447bc8007caea68359743895784262fc8
|
||||
v1.20.2: 2583b1c9fbfc5443a722fb04cf0cc83df18e45880a2cf1f6b52d9f595c5beb88
|
||||
v1.20.1: 3f4b52a8072013e4cd34c9ea07e3c0c4e0350b227e00507fb1ae44a9adbf6785
|
||||
v1.20.0: a5895007f331f08d2e082eb12458764949559f30bcc5beae26c38f3e2724262c
|
||||
v1.19.9: 7128c9e38ab9c445a3b02d3d0b3f0f15fe7fbca56fd87b84e575d7b29e999ad9
|
||||
v1.19.8: a0737d3a15ca177816b6fb1fd59bdd5a3751bfdc66de4e08dffddba84e38bf3f
|
||||
v1.19.7: d46eb3bbe2575e5b6bedbc6d3519424b4f2f57929d7da1ef7e11c09068f37297
|
||||
v1.19.6: d8a46600bcdcd01f39c11e609e8277975f04c0593f79b2a7b5c67646e1c792d8
|
||||
v1.19.5: 5f5a0bebde8a8782b2d9de9bd314dce722f9f960bee090b121ea5ac38cf4377d
|
||||
|
@ -246,6 +305,8 @@ kubectl_checksums:
|
|||
v1.19.2: f51adfe7968ee173dbfb3dabfc10dc774983cbf8a3a7c1c75a1423b91fda6821
|
||||
v1.19.1: da4de99d4e713ba0c0a5ef6efe1806fb09c41937968ad9da5c5f74b79b3b38f5
|
||||
v1.19.0: 79bb0d2f05487ff533999a639c075043c70a0a1ba25c1629eb1eef6ebe3ba70f
|
||||
v1.18.17: db58bb46e29caecd98090e827a3f5075f01f166afb6da9463cc7d8a47787cb78
|
||||
v1.18.16: 3e8cd55186ffe461f383af06cbbab2bfe5dd75def1d699523c53ee8b11b8c91a
|
||||
v1.18.15: eb5a5dd0a72795942ab81d1e4331625e80a90002c8bb39b2cb15aa707a3812c6
|
||||
v1.18.14: 8c924c1fdf743c2a3bf0edbd4333f54c1bce64871abc1a729243321d99b567d4
|
||||
v1.18.13: 8914a4529aaa5f358c663c03bc2cb741e5667f8142e37435689a851647b7697f
|
||||
|
@ -263,6 +324,14 @@ kubectl_checksums:
|
|||
v1.18.0: bb16739fcad964c197752200ff89d89aad7b118cb1de5725dc53fe924c40e3f7
|
||||
kubeadm_checksums:
|
||||
arm:
|
||||
v1.20.5: e8f9be7ffa4eac3bd8dc32229025a17da5b4fbd527b37272b890920377406db4
|
||||
v1.20.4: 53759f7b62e465adc8bddcc11b23b87ceb263d83a965669dc4fc62bd6fdd228c
|
||||
v1.20.3: 35c78f4a86f9c3a791b3ecaf669734f790df1391e6bf310c3b2c51c3b97be05c
|
||||
v1.20.2: 83bcc176672ad55525ae87111f502d3d0cfec5bebb805634191063387298811c
|
||||
v1.20.1: 0e63af77d148e64cd1710f98d56967d5490df6a978658f6d5e2cd6b9cbf3c61a
|
||||
v1.20.0: 38829599cfcab3274e9cf1aff942db06d7ec091510399722159fa3fa49a6fef0
|
||||
v1.19.9: b60a386bbfbf2feb74cb94a62b32d47b2a85086deebc3127d62126c170077f5b
|
||||
v1.19.8: 6761f93e6983c4d7b924209a50a8664a2a78ac5dbb8c33e8e3ba898f11d49773
|
||||
v1.19.7: 48722b7c93d18a13e734200de202912a324769139d0d434ff1dfff82af164814
|
||||
v1.19.6: e3ecde2f2eccb177c0e4b8c6bd19ae471bc19977a8f812cb17094743d42b5b6e
|
||||
v1.19.5: b034594ebe9096f27b3bc5a0b1a98baed9d54ba1457d3d13208f0d490db39000
|
||||
|
@ -271,6 +340,8 @@ kubeadm_checksums:
|
|||
v1.19.2: effc35d1e3ab01ac80185ff9f7ca5afabbb94f5f91d7326b04b09e903315647d
|
||||
v1.19.1: 0e910cf9c771976f6eb079098ad428f3e99080624f478e3d71b670005a7c3651
|
||||
v1.19.0: 62fca8b373f8c63409bcfcb7c585f8de882a8a119d88d39666e1ab3a11be188b
|
||||
v1.18.17: 975392437bf9e427190007af37dd362f8d4413d296fd9b469bda2a6eace75fd1
|
||||
v1.18.16: 277ce77d09d5f25c3fb9faa67fda6274999e4bd1deeae38186aae4d5a856f38a
|
||||
v1.18.15: b242890123a5ecc574157a959475c903eeb14ed17b4578902a17b8d994d4c401
|
||||
v1.18.14: 7e5d4beedcaf13c0076f03c9232464946faa061effd5db8c7705d317a4ee6e95
|
||||
v1.18.13: 3785825c1b9a1fbb90abc00077d9ccd43610b147302c29501b9ce12959fb13bf
|
||||
|
@ -287,6 +358,14 @@ kubeadm_checksums:
|
|||
v1.18.1: 4f919ad7215209dee97ea4c61668e44a2cce8f575b9cf4032e47f0c377924854
|
||||
v1.18.0: 0f05bd526bb38be11459675c69bc882a2d3e583e48339fab49b620d292c2433e
|
||||
arm64:
|
||||
v1.20.5: d3d587bb1db1411b662d4ede0305d39725a68b8453423e76b2195fa01c4f2a37
|
||||
v1.20.4: c3ff7f944826889a23a002c85e8f9f9d9a8bc95e9083fbdda59831e3e34245a7
|
||||
v1.20.3: d4699a79e99c4603adb13f87825ce1109fffefd803966f8187baeb8e25cd0060
|
||||
v1.20.2: 50f16984e4fc5681ba1fa7fb8a19fc1ca72e79ff5c16e97f5b500228efcf9a75
|
||||
v1.20.1: 91eb63944e9a033bb40dfc638913ca76e07d6877a24d25636bf833459ae90010
|
||||
v1.20.0: 16faf8bf3c94cb21dcae131cbfbd0961fc5fef37cb7e37f872ff249e235e0f46
|
||||
v1.19.9: 403c767bef0d681aebc45d5643787fc8c0b9344866cbd339368637a05ea1d11c
|
||||
v1.19.8: dfb838ffb88d79e4d881326f611ae5e5999accb54cdd666c75664da264b5d58e
|
||||
v1.19.7: 43615e65092de34dcaf34725d8adc0af91b660fa54a885421fdb29f743478911
|
||||
v1.19.6: 082ceac5f542cb49a62cf47212bf1ea9dbb15d1831126f526d15e429b4f0687d
|
||||
v1.19.5: 8c4363e910f0070d597f3e5b9a7b8741e1ef20778a8dddc15ac47bf02389d964
|
||||
|
@ -295,6 +374,8 @@ kubeadm_checksums:
|
|||
v1.19.2: b6900a44558ba1a0a364406e2072163f5fc561030da97045e1403cdc69ff2682
|
||||
v1.19.1: dcdabd2fdec9b4dd8febd1625e1fbbe15362919041e5f4ce3aab629e4aea7540
|
||||
v1.19.0: db1c432646e6e6484989b6f7191f3610996ac593409f12574290bfc008ea11f5
|
||||
v1.18.17: 39e6a732aa060fecd3e6b23c30ec325b5afd963466e857d8b2e65b7c729f450b
|
||||
v1.18.16: fbb945fff686eb2683156dee16fe2bf029e6afe575f68a54da2f81f5192dbff7
|
||||
v1.18.15: d6d4d9b8e4992c218ff6185da7928289c9938796d5c08a7427625a563c74a075
|
||||
v1.18.14: e4c1aaed946dd38ca6d9fdef0ef333b4d488a25810aa6f8a98de1b9dd6d47db3
|
||||
v1.18.13: 54d035ea9356b2f7f146d7e287aba128d3a0c594e0f0112f6ccba1d0294770c9
|
||||
|
@ -311,6 +392,14 @@ kubeadm_checksums:
|
|||
v1.18.1: 0cb6589d9b4c09b007eae977ab8a185fc4140eda886408dced4f500a508e4e83
|
||||
v1.18.0: 2ef1785159c80a9acd454a1c8be3c6b8db2260200b22e4359426e709ff786d01
|
||||
amd64:
|
||||
v1.20.5: eecb68547f0f99951b0ed910dcb0cfa32766587cc79a53960766164a56d6aed0
|
||||
v1.20.4: dcc5629da2c31a000b9b50db077b1cd51a6840e08233fd64b67e37f3f098c392
|
||||
v1.20.3: 768896db3163d9926f1a0dcf6a7dd9eeeca64a3e162758425ec43e524ada4a5a
|
||||
v1.20.2: e0fce64f3afd3a84bce0996ccdb483812607a91f03e726231f2aaeb622bb9519
|
||||
v1.20.1: c5abaf7db925d1303866c8da6cb20ac2d4404588bc503805ef43c3cebf5ce7b9
|
||||
v1.20.0: b0ef92da9901670437af869e88c2799512bcb858b886207a081882795567e807
|
||||
v1.19.9: 917712bbd38b625aca456ffa78bf134d64f0efb186cc5772c9844ba6d74fd920
|
||||
v1.19.8: 9c6646cdf03efc3194afc178647205195da4a43f58d0b70954953f566fa15c76
|
||||
v1.19.7: c63ef1842533cd7888c7452cab9f320dcf45fc1c173e9d40abb712d45992db24
|
||||
v1.19.6: 6204d9f16554480fe64a09e9efef31559f1da623fb34492a9a18b085afac876a
|
||||
v1.19.5: 00a2eb2b919661b46cd845c5645e3258806f331f1ea8015e136e733c8c5dd515
|
||||
|
@ -319,6 +408,8 @@ kubeadm_checksums:
|
|||
v1.19.2: 377dbf06469709aafb7a9a6c925e890bf48727b905455c2a81fee28727716f2f
|
||||
v1.19.1: d5afcf4ff916b4c6810f10f76176c73238f0854b7748b2cde8afbd91de65a3c9
|
||||
v1.19.0: 88ce7dc5302d8847f6e679aab9e4fa642a819e8a33d70731fb7bc8e110d8659f
|
||||
v1.18.17: e09614e930e47d9eee9324e826984af447ffc9a95f4f79a14633980a8cb5b691
|
||||
v1.18.16: 775d6f59cb8d6c16cb66f48087a4b8a31a8d3b84de4653b7d0cc061195306345
|
||||
v1.18.15: 8a5be9e04343e0ac10320455b32a78e5ffc60f450c5c0a11914edeb86ca178d7
|
||||
v1.18.14: d6143cd822218daa5faf583c9b8b862c609e66052232e3d3d23c72957fdae341
|
||||
v1.18.13: cad917c516bc326ecc17c50bf81b1c0bb153a5090206ccf88d48c23839941d08
|
||||
|
@ -347,25 +438,25 @@ cni_binary_checksums:
|
|||
amd64: 58a58d389895ba9f9bbd3ef330f186c0bb7484136d0bfb9b50152eed55d9ec24
|
||||
calicoctl_binary_checksums:
|
||||
arm:
|
||||
v3.16.5: 0
|
||||
v3.16.4: 0
|
||||
v3.16.2: 0
|
||||
v3.16.9: 0
|
||||
v3.15.2: 0
|
||||
amd64:
|
||||
v3.16.5: d4175559ad0cf69a1352be3d824ae0a794305d6cd5b17ea0ffc6a153b21d2ae7
|
||||
v3.16.4: a502749420e7424090a73644a8bdc1270c6ef679f8f1d33f753d48d60c930377
|
||||
v3.16.2: 801b059a4fd0dac8795693026c69a79a00dd2353eff597cc36b79fcb6ec53a0a
|
||||
v3.16.9: 3858748e3446f24f6176ef4125fa58bb6f934f6e838cf92f60eafee6aa8117e5
|
||||
v3.15.2: 219ae954501cbe15daeda0ad52e13ec65f99c77548c7d3cbfc4ced5c7149fdf1
|
||||
arm64:
|
||||
v3.16.5: 230579d8761e1cee5ffd447af5a1b92af76ed89f6b7e51771d590017aca5cbb9
|
||||
v3.16.4: 003f16a501e876af0da67324b7fed6ca72f5547c15bbe3b798a8eeb707056d43
|
||||
v3.16.2: aa5695940ec8a36393725a5ce7b156f776fed8da38b994c0828d7f3a60e59bc6
|
||||
v3.16.9: beac9a1ac66fbed05000901aa883443397f05e0f6ffe10ceeccf89f427edc8ad
|
||||
v3.15.2: 49165f9e4ad55402248b578310fcf68a57363f54e66be04ac24be9714899b4d5
|
||||
calico_crds_archive_checksums:
|
||||
v3.16.9: cc33648e3fffd973c8780f49589b1ecf57487f8693cfd12a335dcd2a606784c0
|
||||
v3.15.2: 82e7122ec04a89c89861b8377c39ae357c7cdbbf60b5f0f1b8fc18ba6bda7dc2
|
||||
|
||||
helm_archive_checksums:
|
||||
arm: 9da6cc39a796f85b6c4e6d48fd8e4888f1003bfb7a193bb6c427cdd752ad40bb
|
||||
amd64: b664632683c36446deeb85c406871590d879491e3de18978b426769e43a1e82c
|
||||
arm64: bdd00b8ff422171b4be5b649a42e5261394a89d7ea57944005fc34d34d1f8160
|
||||
arm:
|
||||
v3.5.3: fd9c1e1eaa6d8d2c9df6027524e80b8bfde0ea49de5f324845256b3e9cc2edb0
|
||||
amd64:
|
||||
v3.5.3: 2170a1a644a9e0b863f00c17b761ce33d4323da64fc74562a3a6df2abbf6cd70
|
||||
arm64:
|
||||
v3.5.3: e1348d94ce4caace43689ee2dfa5f8bcd8687c12053d9c13d79875b65d6b72aa
|
||||
|
||||
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch] }}"
|
||||
cni_binary_checksum: "{{ cni_binary_checksums[image_arch] }}"
|
||||
|
@ -373,8 +464,9 @@ kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}"
|
|||
kubectl_binary_checksum: "{{ kubectl_checksums[image_arch][kube_version] }}"
|
||||
kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}"
|
||||
calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}"
|
||||
calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}"
|
||||
crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}"
|
||||
helm_archive_checksum: "{{ helm_archive_checksums[image_arch] }}"
|
||||
helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}"
|
||||
|
||||
# Containers
|
||||
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
|
||||
|
@ -390,13 +482,13 @@ etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arc
|
|||
flannel_image_repo: "{{ quay_image_repo }}/coreos/flannel"
|
||||
flannel_image_tag: "{{ flannel_version }}"
|
||||
calico_node_image_repo: "{{ quay_image_repo }}/calico/node"
|
||||
calico_node_image_tag: "{{ calico_version }}"
|
||||
calico_node_image_tag: "{{ calico_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
||||
calico_cni_image_repo: "{{ quay_image_repo }}/calico/cni"
|
||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||
calico_cni_image_tag: "{{ calico_cni_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
||||
calico_policy_image_repo: "{{ quay_image_repo }}/calico/kube-controllers"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
||||
calico_typha_image_repo: "{{ quay_image_repo }}/calico/typha"
|
||||
calico_typha_image_tag: "{{ calico_typha_version }}"
|
||||
calico_typha_image_tag: "{{ calico_typha_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
|
||||
pod_infra_image_repo: "{{ kube_image_repo }}/pause"
|
||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||
install_socat_image_repo: "{{ docker_image_repo }}/xueshanf/install-socat"
|
||||
|
@ -430,7 +522,7 @@ ovn4nfv_k8s_plugin_image_tag: "{{ ovn4nfv_k8s_plugin_image_version }}"
|
|||
nginx_image_repo: "{{ docker_image_repo }}/library/nginx"
|
||||
nginx_image_tag: 1.19
|
||||
haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy"
|
||||
haproxy_image_tag: 2.2
|
||||
haproxy_image_tag: 2.3
|
||||
|
||||
# Coredns version should be supported by corefile-migration (or at least work with)
|
||||
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
|
||||
|
@ -438,7 +530,7 @@ coredns_version: "1.7.0"
|
|||
coredns_image_repo: "{{ kube_image_repo }}/coredns"
|
||||
coredns_image_tag: "{{ coredns_version }}"
|
||||
|
||||
nodelocaldns_version: "1.16.0"
|
||||
nodelocaldns_version: "1.17.1"
|
||||
nodelocaldns_image_repo: "{{ kube_image_repo }}/dns/k8s-dns-node-cache"
|
||||
nodelocaldns_image_tag: "{{ nodelocaldns_version }}"
|
||||
|
||||
|
@ -462,7 +554,7 @@ cephfs_provisioner_image_tag: "v2.1.0-k8s1.11"
|
|||
rbd_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/rbd-provisioner"
|
||||
rbd_provisioner_image_tag: "v2.1.1-k8s1.11"
|
||||
local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
|
||||
local_path_provisioner_image_tag: "v0.0.17"
|
||||
local_path_provisioner_image_tag: "v0.0.19"
|
||||
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
|
||||
ingress_nginx_controller_image_tag: "v0.41.2"
|
||||
ingress_ambassador_image_repo: "{{ quay_image_repo }}/datawire/ambassador-operator"
|
||||
|
@ -732,6 +824,23 @@ downloads:
|
|||
groups:
|
||||
- k8s-cluster
|
||||
|
||||
calico_crds:
|
||||
file: true
|
||||
enabled: "{{ kube_network_plugin == 'calico' and calico_datastore == 'kdd' }}"
|
||||
version: "{{ calico_version }}"
|
||||
dest: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ calico_version }}.tar.gz"
|
||||
sha256: "{{ calico_crds_archive_checksum }}"
|
||||
url: "{{ calico_crds_download_url }}"
|
||||
unarchive: true
|
||||
unarchive_extra_opts:
|
||||
- "--strip=6"
|
||||
- "--wildcards"
|
||||
- "*/_includes/charts/calico/crds/kdd/"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
- kube-master
|
||||
|
||||
weave_kube:
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
container: true
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
- name: download_container | Determine if image is in cache
|
||||
stat:
|
||||
path: "{{ image_path_cached }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
delegate_facts: no
|
||||
|
|
|
@ -6,5 +6,6 @@
|
|||
owner: "{{ download.owner | default(omit) }}"
|
||||
mode: "{{ download.mode | default(omit) }}"
|
||||
copy: no
|
||||
extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}"
|
||||
when:
|
||||
- download.unarchive | default(false)
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
- name: Stat etcd v2 data directory
|
||||
stat:
|
||||
path: "{{ etcd_data_dir }}/member"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: etcd_data_dir_member
|
||||
|
||||
- name: Backup etcd v2 data
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
- name: "Check certs | check if a cert already exists on node"
|
||||
stat:
|
||||
path: "{{ etcd_cert_dir }}/{{ item }}"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: etcdcert_node
|
||||
with_items:
|
||||
- ca.pem
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_cluster_is_healthy
|
||||
|
@ -19,7 +19,7 @@
|
|||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_events_cluster_is_healthy
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
{%- endfor -%}
|
||||
|
||||
- name: Join Member | Ensure member is in etcd-events cluster
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_events_access_address }} >/dev/null"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_events_member_in_cluster
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
{%- endfor -%}
|
||||
|
||||
- name: Join Member | Ensure member is in etcd cluster
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ etcd_access_address }} >/dev/null"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_member_in_cluster
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
- name: Check if etcdctl exist
|
||||
stat:
|
||||
path: "{{ bin_dir }}/etcdctl"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: stat_etcdctl
|
||||
|
||||
- block:
|
||||
|
@ -28,6 +31,9 @@
|
|||
- name: Check if etcdctl still exist after version check
|
||||
stat:
|
||||
path: "{{ bin_dir }}/etcdctl"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: stat_etcdctl
|
||||
|
||||
- block:
|
||||
|
|
|
@ -1,4 +1,13 @@
|
|||
---
|
||||
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
|
||||
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
||||
register: createdby_annotation
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
when:
|
||||
- dns_mode in ['coredns', 'coredns_dual']
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||
kube:
|
||||
name: "coredns"
|
||||
|
@ -9,6 +18,7 @@
|
|||
when:
|
||||
- dns_mode in ['coredns', 'coredns_dual']
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- createdby_annotation.stdout != 'kubespray'
|
||||
|
||||
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
|
||||
kube:
|
||||
|
|
|
@ -23,6 +23,7 @@ spec:
|
|||
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
createdby: 'kubespray'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
nodeSelector:
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
- name: Check if bash_completion.d folder exists # noqa 503
|
||||
stat:
|
||||
path: "/etc/bash_completion.d/"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: stat_result
|
||||
|
||||
- name: Get helm completion
|
||||
|
|
|
@ -1,7 +1,2 @@
|
|||
---
|
||||
- name: "calico upgrade complete"
|
||||
command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- calico_upgrade_enabled|default(True)
|
||||
- calico_upgrade_needed|default(False)
|
||||
# TODO: Handle Calico etcd -> kdd migration
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
- name: Rotate Tokens | Get default token name # noqa 306
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
|
||||
register: default_token
|
||||
changed_when: false
|
||||
until: default_token.rc == 0
|
||||
delay: 4
|
||||
retries: 10
|
||||
|
||||
- name: Rotate Tokens | Get default token data
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
|
||||
register: default_token_data
|
||||
changed_when: false
|
||||
|
||||
- name: Rotate Tokens | Test if default certificate is expired
|
||||
uri:
|
||||
url: https://{{ kube_apiserver_ip }}/api/v1/nodes
|
||||
method: GET
|
||||
return_content: no
|
||||
validate_certs: no
|
||||
headers:
|
||||
Authorization: "Bearer {{ (default_token_data.stdout|from_json)['data']['token']|b64decode }}"
|
||||
register: check_secret
|
||||
failed_when: false
|
||||
|
||||
- name: Rotate Tokens | Determine if certificate is expired
|
||||
set_fact:
|
||||
needs_rotation: '{{ check_secret.status not in [200, 403] }}'
|
||||
|
||||
# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
|
||||
# instead of filtering manually
|
||||
- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
|
||||
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
|
||||
| grep kubernetes.io/service-account-token
|
||||
| egrep 'default-token|kube-proxy|coredns|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
|
||||
register: tokens_to_delete
|
||||
when: needs_rotation
|
||||
|
||||
- name: Rotate Tokens | Delete expired tokens
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
|
||||
with_items: "{{ tokens_to_delete.stdout_lines }}"
|
||||
when: needs_rotation
|
||||
|
||||
- name: Rotate Tokens | Delete pods in system namespace
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all --grace-period=0 --force"
|
||||
when: needs_rotation
|
|
@ -13,11 +13,17 @@
|
|||
- name: Check if kubelet.conf exists
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubelet_conf
|
||||
|
||||
- name: Check if kubeadm CA cert is accessible
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.crt"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubeadm_ca_stat
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
run_once: true
|
||||
|
@ -46,11 +52,6 @@
|
|||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: kubeadm_token is not defined
|
||||
|
||||
- name: Get the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
changed_when: false
|
||||
|
||||
- name: Set kubeadm api version to v1beta2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta2
|
||||
|
@ -64,7 +65,7 @@
|
|||
|
||||
- name: Join to cluster if needed
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
|
||||
when: not is_kube_master and (not kubelet_conf.stat.exists)
|
||||
block:
|
||||
|
||||
|
|
|
@ -194,5 +194,6 @@ secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm
|
|||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
## Force regeneration of kubernetes control plane certificates without the need of bumping the cluster version
|
||||
force_certificate_regeneration: false
|
||||
|
||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||
auto_renew_certificates: false
|
||||
|
|
|
@ -121,21 +121,3 @@
|
|||
until: result.status == 200
|
||||
retries: 60
|
||||
delay: 1
|
||||
|
||||
- name: Master | set secret_changed
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Master | set secret_changed to true
|
||||
- Master | Copy new kubeconfig for root user
|
||||
|
||||
- name: Master | set secret_changed to true
|
||||
set_fact:
|
||||
secret_changed: true
|
||||
|
||||
- name: Master | Copy new kubeconfig for root user
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/admin.conf"
|
||||
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
remote_src: yes
|
||||
mode: "0600"
|
||||
backup: yes
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
- name: Check if secret for encrypting data at rest already exist
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: secrets_encryption_file
|
||||
|
||||
- name: Slurp secrets_encryption file if it exists
|
||||
|
|
28
roles/kubernetes/master/tasks/kubeadm-backup.yml
Normal file
28
roles/kubernetes/master/tasks/kubeadm-backup.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
- name: Backup old certs and keys
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/{{ item }}"
|
||||
dest: "{{ kube_cert_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
- apiserver-kubelet-client.crt
|
||||
- apiserver-kubelet-client.key
|
||||
- front-proxy-client.crt
|
||||
- front-proxy-client.key
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Backup old confs
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/{{ item }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}.old"
|
||||
mode: preserve
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
- kubelet.conf
|
||||
- scheduler.conf
|
||||
ignore_errors: yes
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
- name: Backup old certs and keys
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/{{ item.src }}"
|
||||
dest: "{{ kube_cert_dir }}/{{ item.dest }}"
|
||||
mode: 0640
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- {src: apiserver.crt, dest: apiserver.crt.old}
|
||||
- {src: apiserver.key, dest: apiserver.key.old}
|
||||
- {src: apiserver-kubelet-client.crt, dest: apiserver-kubelet-client.crt.old}
|
||||
- {src: apiserver-kubelet-client.key, dest: apiserver-kubelet-client.key.old}
|
||||
- {src: front-proxy-client.crt, dest: front-proxy-client.crt.old}
|
||||
- {src: front-proxy-client.key, dest: front-proxy-client.key.old}
|
||||
ignore_errors: yes
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
- name: kubeadm | Retrieve files to purge
|
||||
find:
|
||||
paths: "{{ kube_cert_dir }}"
|
||||
patterns: '*.pem'
|
||||
register: files_to_purge_for_kubeadm
|
||||
|
||||
- name: kubeadm | Purge old certs
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items: "{{ files_to_purge_for_kubeadm.files }}"
|
||||
|
||||
- name: kubeadm | Purge old kubeconfig
|
||||
file:
|
||||
path: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
state: absent
|
|
@ -1,34 +1,11 @@
|
|||
---
|
||||
- name: Test if correct apiserver is set in all kubeconfigs
|
||||
shell: >-
|
||||
grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/admin.conf &&
|
||||
grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/controller-manager.conf &&
|
||||
grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/kubelet.conf &&
|
||||
grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/scheduler.conf
|
||||
register: kubeconfig_correct_apiserver
|
||||
changed_when: False
|
||||
failed_when: False
|
||||
|
||||
- name: Create temporary directory
|
||||
tempfile:
|
||||
state: directory
|
||||
register: kubeconfig_temp_dir
|
||||
when: kubeconfig_correct_apiserver.rc != 0
|
||||
|
||||
- name: Generate new kubeconfigs with correct apiserver
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase kubeconfig all
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--kubeconfig-dir {{ kubeconfig_temp_dir.path }}
|
||||
when: kubeconfig_correct_apiserver.rc != 0
|
||||
|
||||
- name: Copy new kubeconfigs to kube config dir
|
||||
copy:
|
||||
src: "{{ kubeconfig_temp_dir.path }}/{{ item }}"
|
||||
- name: Update server field in component kubeconfigs
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||
mode: 0640
|
||||
remote_src: yes
|
||||
when: kubeconfig_correct_apiserver.rc != 0
|
||||
regexp: '^ server: https'
|
||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||
backup: yes
|
||||
with_items:
|
||||
- admin.conf
|
||||
- controller-manager.conf
|
||||
|
@ -38,9 +15,3 @@
|
|||
- "Master | Restart kube-controller-manager"
|
||||
- "Master | Restart kube-scheduler"
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Cleanup temporary directory
|
||||
file:
|
||||
path: "{{ kubeconfig_temp_dir.path }}"
|
||||
state: absent
|
||||
when: kubeconfig_correct_apiserver.rc != 0
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
- name: Copy old certs to the kubeadm expected path
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/{{ item.src }}"
|
||||
dest: "{{ kube_cert_dir }}/{{ item.dest }}"
|
||||
mode: 0640
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- {src: apiserver.pem, dest: apiserver.crt}
|
||||
- {src: apiserver-key.pem, dest: apiserver.key}
|
||||
- {src: ca.pem, dest: ca.crt}
|
||||
- {src: ca-key.pem, dest: ca.key}
|
||||
- {src: front-proxy-ca.pem, dest: front-proxy-ca.crt}
|
||||
- {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key}
|
||||
- {src: front-proxy-client.pem, dest: front-proxy-client.crt}
|
||||
- {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
|
||||
- {src: service-account-key.pem, dest: sa.pub}
|
||||
- {src: service-account-key.pem, dest: sa.key}
|
||||
- {src: "node-{{ inventory_hostname }}.pem", dest: apiserver-kubelet-client.crt}
|
||||
- {src: "node-{{ inventory_hostname }}-key.pem", dest: apiserver-kubelet-client.key}
|
||||
register: kubeadm_copy_old_certs
|
|
@ -57,15 +57,12 @@
|
|||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir }}/kubeadm-controlplane.yaml
|
||||
--ignore-preflight-errors=all
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
register: kubeadm_join_control_plane
|
||||
retries: 3
|
||||
throttle: 1
|
||||
until: kubeadm_join_control_plane is succeeded
|
||||
when:
|
||||
- inventory_hostname != groups['kube-master']|first
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
|
||||
- name: Set secret_changed to false to avoid extra token rotation
|
||||
set_fact:
|
||||
secret_changed: false
|
||||
|
|
|
@ -1,15 +1,4 @@
|
|||
---
|
||||
- name: kubeadm | Check if old apiserver cert exists on host
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/apiserver.pem"
|
||||
register: old_apiserver_cert
|
||||
delegate_to: "{{ groups['kube-master'] | first }}"
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | Migrate old certs if necessary
|
||||
import_tasks: kubeadm-migrate-certs.yml
|
||||
when: old_apiserver_cert.stat.exists
|
||||
|
||||
- name: Install OIDC certificate
|
||||
copy:
|
||||
content: "{{ kube_oidc_ca_cert | b64decode }}"
|
||||
|
@ -21,39 +10,18 @@
|
|||
- kube_oidc_auth
|
||||
- kube_oidc_ca_cert is defined
|
||||
|
||||
- name: kubeadm | Check serviceaccount key
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/sa.key"
|
||||
register: sa_key_before
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | Check if kubeadm has already run
|
||||
stat:
|
||||
path: "/var/lib/kubelet/config.yaml"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubeadm_already_run
|
||||
|
||||
- name: kubeadm | Delete old admin.conf
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/admin.conf"
|
||||
state: absent
|
||||
- name: kubeadm | Backup kubeadm certs / kubeconfig
|
||||
import_tasks: kubeadm-backup.yml
|
||||
when:
|
||||
- not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | Delete old static pods
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/manifests/{{ item }}.manifest"
|
||||
state: absent
|
||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
|
||||
when:
|
||||
- old_apiserver_cert.stat.exists
|
||||
|
||||
- name: kubeadm | Forcefully delete old static pods
|
||||
shell: "set -o pipefail && docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
when:
|
||||
- old_apiserver_cert.stat.exists
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | aggregate all SANs
|
||||
set_fact:
|
||||
|
@ -102,14 +70,15 @@
|
|||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
|
||||
- name: kubeadm | set kubeadm version
|
||||
import_tasks: kubeadm-version.yml
|
||||
- name: Set kubeadm api version to v1beta2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta2
|
||||
|
||||
- name: kubeadm | Certificate management with kubeadm
|
||||
import_tasks: kubeadm-certificate.yml
|
||||
when:
|
||||
- not upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
- name: kubeadm | Create kubeadm config
|
||||
template:
|
||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
mode: 0640
|
||||
|
||||
- name: kubeadm | Check if apiserver.crt contains all needed SANs
|
||||
command: openssl x509 -noout -in "{{ kube_cert_dir }}/apiserver.crt" -check{{ item|ipaddr|ternary('ip','host') }} "{{ item }}"
|
||||
|
@ -130,7 +99,7 @@
|
|||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed or force_certificate_regeneration
|
||||
- apiserver_sans_check.changed
|
||||
|
||||
- name: kubeadm | regenerate apiserver cert 2/2
|
||||
command: >-
|
||||
|
@ -140,7 +109,7 @@
|
|||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_already_run.stat.exists
|
||||
- apiserver_sans_check.changed or force_certificate_regeneration
|
||||
- apiserver_sans_check.changed
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: >-
|
||||
|
@ -208,22 +177,6 @@
|
|||
- upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | Check serviceaccount key again
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/sa.key"
|
||||
register: sa_key_after
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | Set secret_changed if service account key was updated
|
||||
command: /bin/true
|
||||
notify: Master | set secret_changed
|
||||
when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
|
||||
|
||||
- name: kubeadm | cleanup old certs if necessary
|
||||
import_tasks: kubeadm-cleanup-old-certs.yml
|
||||
when:
|
||||
- old_apiserver_cert.stat.exists
|
||||
|
||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||
- name: kubeadm | Remove taint for master with node role
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
uri:
|
||||
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: inventory_hostname == groups['kube-master']|first
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
register: _result
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
- name: Get the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
changed_when: false
|
||||
|
||||
- name: Set kubeadm api version to v1beta2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1beta2
|
||||
|
||||
- name: kubeadm | Create kubeadm config
|
||||
template:
|
||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
mode: 0640
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
- name: Fixup kubelet client cert rotation 1/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-certificate-data: '
|
||||
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
||||
|
||||
- name: Fixup kubelet client cert rotation 2/2
|
||||
lineinfile:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '^ client-key-data: '
|
||||
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||
backup: yes
|
||||
notify:
|
||||
- "Master | reload kubelet"
|
|
@ -62,3 +62,31 @@
|
|||
|
||||
- name: Include kubeadm secondary server apiserver fixes
|
||||
include_tasks: kubeadm-fix-apiserver.yml
|
||||
|
||||
- name: Include kubelet client cert rotation fixes
|
||||
include_tasks: kubelet-fix-client-cert-rotation.yml
|
||||
when: kubelet_rotate_certificates
|
||||
|
||||
- name: Install script to renew K8S control plane certificates
|
||||
template:
|
||||
src: k8s-certs-renew.sh.j2
|
||||
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
|
||||
mode: '755'
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 1/2
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
with_items:
|
||||
- k8s-certs-renew.service
|
||||
- k8s-certs-renew.timer
|
||||
register: k8s_certs_units
|
||||
when: auto_renew_certificates
|
||||
|
||||
- name: Renew K8S control plane certificates monthly 2/2
|
||||
systemd:
|
||||
name: k8s-certs-renew.timer
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon-reload: "{{ k8s_certs_units is changed }}"
|
||||
when: auto_renew_certificates
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
[Unit]
|
||||
Description=Renew K8S control plane certificates
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ bin_dir }}/k8s-certs-renew.sh
|
23
roles/kubernetes/master/templates/k8s-certs-renew.sh.j2
Normal file
23
roles/kubernetes/master/templates/k8s-certs-renew.sh.j2
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "## Expiration before renewal ##"
|
||||
{{ bin_dir }}/kubeadm {{ 'alpha ' if kube_version is version('v1.20.0', '<') }}certs check-expiration
|
||||
|
||||
echo "## Renewing certificates managed by kubeadm ##"
|
||||
{{ bin_dir }}/kubeadm {{ 'alpha ' if kube_version is version('v1.20.0', '<') }}certs renew all
|
||||
|
||||
echo "## Restarting control plane pods managed by kubeadm ##"
|
||||
{% if container_manager == "docker" %}
|
||||
{{ docker_bin_dir }}/docker ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs {{ docker_bin_dir }}/docker rm -f"
|
||||
{% else %}
|
||||
{{ bin_dir }}/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs {{ bin_dir }}/crictl rmp -f
|
||||
{% endif %}
|
||||
|
||||
echo "## Updating /root/.kube/config ##"
|
||||
/usr/bin/cp {{ kube_config_dir }}/admin.conf /root/.kube/config
|
||||
|
||||
echo "## Waiting for apiserver to be up again ##"
|
||||
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
|
||||
|
||||
echo "## Expiration after renewal ##"
|
||||
{{ bin_dir }}/kubeadm {{ 'alpha ' if kube_version is version('v1.20.0', '<') }}certs check-expiration
|
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Timer to renew K8S control plane certificates
|
||||
|
||||
[Timer]
|
||||
# First Monday of each month
|
||||
OnCalendar=Mon *-*-1..7 03:{{ groups['kube-master'].index(inventory_hostname) }}0:00
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -59,12 +59,12 @@ etcd:
|
|||
quota-backend-bytes: "{{ etcd_quota_backend_bytes }}"
|
||||
{% endif %}
|
||||
{% if etcd_log_package_levels is defined %}
|
||||
log-package_levels: "{{ etcd_log_package_levels }}"
|
||||
log-package-levels: "{{ etcd_log_package_levels }}"
|
||||
{% endif %}
|
||||
{% for key, value in etcd_extra_vars.items() %}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor %}
|
||||
{% if host_architecture != "amd64" -%}
|
||||
{% if host_architecture != "amd64" %}
|
||||
etcd-unsupported-arch: {{host_architecture}}
|
||||
{% endif %}
|
||||
serverCertSANs:
|
||||
|
|
|
@ -6,11 +6,6 @@
|
|||
state: directory
|
||||
when: dynamic_kubelet_configuration
|
||||
|
||||
- name: Get the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
changed_when: false
|
||||
|
||||
- name: Set kubelet api version to v1beta1
|
||||
set_fact:
|
||||
kubeletConfig_api_version: v1beta1
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
- name: haproxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: haproxy_stat
|
||||
|
||||
- name: haproxy | Write static pod
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
- name: nginx-proxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ nginx_config_dir }}/nginx.conf"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: nginx_stat
|
||||
|
||||
- name: nginx-proxy | Write static pod
|
||||
|
|
|
@ -8,17 +8,19 @@
|
|||
"location": "{{ azure_location }}",
|
||||
"subnetName": "{{ azure_subnet_name }}",
|
||||
"securityGroupName": "{{ azure_security_group_name }}",
|
||||
"securityGroupResourceGroup": "{{ azure_security_group_resource_group | default(azure_vnet_resource_group) }}",
|
||||
"vnetName": "{{ azure_vnet_name }}",
|
||||
"vnetResourceGroup": "{{ azure_vnet_resource_group }}",
|
||||
"routeTableName": "{{ azure_route_table_name }}",
|
||||
"routeTableResourceGroup": "{{ azure_route_table_resource_group | default(azure_vnet_resource_group) }}",
|
||||
"vmType": "{{ azure_vmtype }}",
|
||||
{% if azure_primary_availability_set_name is defined %}
|
||||
"primaryAvailabilitySetName": "{{ azure_primary_availability_set_name }}",
|
||||
{%endif%}
|
||||
"useInstanceMetadata": {{azure_use_instance_metadata }},
|
||||
"useInstanceMetadata": {{azure_use_instance_metadata | lower }},
|
||||
{% if azure_loadbalancer_sku == "standard" %}
|
||||
"excludeMasterFromStandardLB": {{ azure_exclude_master_from_standard_lb }},
|
||||
"disableOutboundSNAT": {{ azure_disable_outbound_snat }},
|
||||
"excludeMasterFromStandardLB": {{ azure_exclude_master_from_standard_lb | lower }},
|
||||
"disableOutboundSNAT": {{ azure_disable_outbound_snat | lower }},
|
||||
{% endif%}
|
||||
"loadBalancerSku": "{{ azure_loadbalancer_sku }}"
|
||||
}
|
||||
|
|
|
@ -22,8 +22,6 @@ spec:
|
|||
requests:
|
||||
cpu: {{ loadbalancer_apiserver_cpu_requests }}
|
||||
memory: {{ loadbalancer_apiserver_memory_requests }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -22,8 +22,6 @@ spec:
|
|||
requests:
|
||||
cpu: {{ loadbalancer_apiserver_cpu_requests }}
|
||||
memory: {{ loadbalancer_apiserver_memory_requests }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
service:
|
||||
name: NetworkManager.service
|
||||
state: restarted
|
||||
when: is_fedora_coreos
|
||||
|
||||
- name: Preinstall | reload kubelet
|
||||
service:
|
||||
|
@ -50,13 +49,21 @@
|
|||
|
||||
# FIXME(mattymo): Also restart for kubeadm mode
|
||||
- name: Preinstall | kube-apiserver configured
|
||||
stat: path="{{ kube_manifest_dir }}/kube-apiserver.manifest"
|
||||
stat:
|
||||
path: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_apiserver_set
|
||||
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
# FIXME(mattymo): Also restart for kubeadm mode
|
||||
- name: Preinstall | kube-controller configured
|
||||
stat: path="{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
stat:
|
||||
path: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_controller_set
|
||||
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@
|
|||
that: groups.etcd|length is not divisibleby 2
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
- groups.get('etcd')
|
||||
- inventory_hostname in groups['etcd']
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
|
||||
- name: Stop if memory is too small for masters
|
||||
assert:
|
||||
|
@ -274,7 +273,7 @@
|
|||
that: etcd_deployment_type in ['host', 'docker']
|
||||
msg: "The etcd deployment type, 'etcd_deployment_type', must be host or docker"
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
- not etcd_kubeadm_enabled
|
||||
|
||||
- name: Stop if etcd deployment type is not host when container_manager != docker
|
||||
|
@ -282,7 +281,7 @@
|
|||
that: etcd_deployment_type == 'host'
|
||||
msg: "The etcd deployment type, 'etcd_deployment_type', must be host when container_manager is not docker"
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']
|
||||
- inventory_hostname in groups.get('etcd',[])
|
||||
- not etcd_kubeadm_enabled
|
||||
- container_manager != 'docker'
|
||||
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
- name: check if booted with ostree
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: set is_fedora_coreos
|
||||
|
@ -59,6 +62,9 @@
|
|||
- name: check if kubelet is configured
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/kubelet.env"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kubelet_configured
|
||||
changed_when: false
|
||||
|
||||
|
@ -84,6 +90,9 @@
|
|||
- name: check if /etc/dhclient.conf exists
|
||||
stat:
|
||||
path: /etc/dhclient.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhclient.conf
|
||||
|
@ -94,6 +103,9 @@
|
|||
- name: check if /etc/dhcp/dhclient.conf exists
|
||||
stat:
|
||||
path: /etc/dhcp/dhclient.conf
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: dhcp_dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
|
||||
|
@ -170,6 +182,9 @@
|
|||
- name: check /usr readonly
|
||||
stat:
|
||||
path: "/usr"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: usr
|
||||
|
||||
- name: set alternate flexvolume path
|
||||
|
|
|
@ -46,6 +46,9 @@
|
|||
- name: Check if kubernetes kubeadm compat cert dir exists
|
||||
stat:
|
||||
path: "{{ kube_cert_compat_dir }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: kube_cert_compat_dir_check
|
||||
when:
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
- name: NetworkManager | Check if host has NetworkManager
|
||||
# noqa 303 Should we use service_facts for this?
|
||||
command: systemctl is-active --quiet NetworkManager.service
|
||||
register: nm_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: NetworkManager | Ensure NetworkManager conf.d dir
|
||||
file:
|
||||
path: "/etc/NetworkManager/conf.d"
|
||||
state: directory
|
||||
recurse: yes
|
||||
when: nm_check.rc == 0
|
||||
|
||||
- name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico)
|
||||
copy:
|
||||
content: |
|
||||
[keyfile]
|
||||
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
|
||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||
when:
|
||||
- nm_check.rc == 0
|
||||
- kube_network_plugin == "calico"
|
||||
notify: Preinstall | reload NetworkManager
|
||||
|
||||
# TODO: add other network_plugin interfaces
|
||||
|
||||
- name: NetworkManager | Prevent NetworkManager from managing K8S interfaces (kube-ipvs0/nodelocaldns)
|
||||
copy:
|
||||
content: |
|
||||
[keyfile]
|
||||
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
|
||||
dest: /etc/NetworkManager/conf.d/k8s.conf
|
||||
when: nm_check.rc == 0
|
||||
notify: Preinstall | reload NetworkManager
|
|
@ -40,7 +40,7 @@
|
|||
- bootstrap-os
|
||||
|
||||
- name: Install epel-release on RedHat/CentOS
|
||||
yum:
|
||||
package:
|
||||
name: epel-release
|
||||
state: present
|
||||
when:
|
||||
|
@ -56,10 +56,9 @@
|
|||
when: kube_proxy_mode == 'ipvs'
|
||||
|
||||
- name: Install packages requirements
|
||||
action:
|
||||
module: "{{ ansible_pkg_mgr }}"
|
||||
package:
|
||||
name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}"
|
||||
state: latest
|
||||
state: present
|
||||
register: pkgs_task_result
|
||||
until: pkgs_task_result is succeeded
|
||||
retries: "{{ pkg_install_retries }}"
|
||||
|
@ -69,7 +68,7 @@
|
|||
- bootstrap-os
|
||||
|
||||
- name: Install ipvsadm for ClearLinux
|
||||
swupd:
|
||||
package:
|
||||
name: ipvsadm
|
||||
state: present
|
||||
when:
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
- name: Confirm selinux deployed
|
||||
stat:
|
||||
path: /etc/selinux/config
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- "'Amazon' not in ansible_distribution"
|
||||
|
@ -36,6 +39,9 @@
|
|||
- name: Stat sysctl file configuration
|
||||
stat:
|
||||
path: "{{ sysctl_file_path }}"
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: sysctl_file_stat
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
@ -74,3 +80,10 @@
|
|||
- { name: kernel.panic, value: 10 }
|
||||
- { name: kernel.panic_on_oops, value: 1 }
|
||||
when: kubelet_protect_kernel_defaults|bool
|
||||
|
||||
- name: Check dummy module
|
||||
modprobe:
|
||||
name: dummy
|
||||
state: present
|
||||
params: 'numdummies=0'
|
||||
when: enable_nodelocaldns
|
||||
|
|
|
@ -39,7 +39,11 @@
|
|||
- bootstrap-os
|
||||
- resolvconf
|
||||
|
||||
- import_tasks: 0062-networkmanager.yml
|
||||
- import_tasks: 0062-networkmanager-unmanaged-devices.yml
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- import_tasks: 0063-networkmanager-dns.yml
|
||||
when:
|
||||
- dns_mode != 'none'
|
||||
- resolvconf_mode == 'host_resolvconf'
|
||||
|
@ -92,6 +96,9 @@
|
|||
- name: Check if we are running inside a Azure VM
|
||||
stat:
|
||||
path: /var/lib/waagent/
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: azure_check
|
||||
when:
|
||||
- not dns_late
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
- name: "Check_tokens | check if the tokens have already been generated on first master"
|
||||
stat:
|
||||
path: "{{ kube_token_dir }}/known_tokens.csv"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
register: known_tokens_master
|
||||
run_once: true
|
||||
|
@ -20,6 +23,9 @@
|
|||
- name: "Check tokens | check if a cert already exists"
|
||||
stat:
|
||||
path: "{{ kube_token_dir }}/known_tokens.csv"
|
||||
get_attributes: no
|
||||
get_checksum: yes
|
||||
get_mime: no
|
||||
register: known_tokens
|
||||
|
||||
- name: "Check_tokens | Set 'sync_tokens' to true"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
# Use proxycommand if bastion host is in group all
|
||||
# This change obseletes editing ansible.cfg file depending on bastion existence
|
||||
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
|
||||
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p -p {{ hostvars['bastion']['ansible_port'] | default(22) }} {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
|
||||
|
||||
# selinux state
|
||||
preinstall_selinux_state: permissive
|
||||
|
@ -15,7 +15,7 @@ is_fedora_coreos: false
|
|||
disable_swap: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.19.7
|
||||
kube_version: v1.19.9
|
||||
|
||||
## The minimum version working
|
||||
kube_version_min_required: v1.18.0
|
||||
|
@ -150,9 +150,6 @@ kube_network_plugin_multus: false
|
|||
# Determines if calico-rr group exists
|
||||
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
|
||||
|
||||
# Set to false to disable calico-upgrade
|
||||
calico_upgrade_enabled: true
|
||||
|
||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||
calico_datastore: "kdd"
|
||||
|
||||
|
@ -445,6 +442,8 @@ apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
|||
kube_apiserver_global_endpoint: |-
|
||||
{% if loadbalancer_apiserver is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
{%- elif use_localhost_as_kubeapi_loadbalancer|default(False)|bool -%}
|
||||
https://127.0.0.1:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||
{%- endif %}
|
||||
|
@ -532,3 +531,23 @@ host_architecture: >-
|
|||
# Sets the eventRecordQPS parameter in kubelet-config.yaml. The default value is 5 (see types.go)
|
||||
# Setting it to 0 allows unlimited requests per second.
|
||||
kubelet_event_record_qps: 5
|
||||
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
|
||||
proxy_disable_env:
|
||||
ALL_PROXY: ''
|
||||
FTP_PROXY: ''
|
||||
HTTPS_PROXY: ''
|
||||
HTTP_PROXY: ''
|
||||
NO_PROXY: ''
|
||||
all_proxy: ''
|
||||
ftp_proxy: ''
|
||||
http_proxy: ''
|
||||
https_proxy: ''
|
||||
no_proxy: ''
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue