diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 65ecd3c96..9af54e066 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,7 +8,7 @@ stages:
- deploy-special
variables:
- KUBESPRAY_VERSION: v2.19.0
+ KUBESPRAY_VERSION: v2.20.0
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true"
diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index 53f73e512..c9e1bdea4 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -75,6 +75,13 @@ check-readme-versions:
script:
- tests/scripts/check_readme_versions.sh
+check-typo:
+ stage: unit-tests
+ tags: [light]
+ image: python:3
+ script:
+ - tests/scripts/check_typo.sh
+
ci-matrix:
stage: unit-tests
tags: [light]
diff --git a/.gitlab-ci/packet.yml b/.gitlab-ci/packet.yml
index ad56ffa4b..47b4690cd 100644
--- a/.gitlab-ci/packet.yml
+++ b/.gitlab-ci/packet.yml
@@ -51,6 +51,11 @@ packet_ubuntu20-aio-docker:
extends: .packet_pr
when: on_success
+packet_ubuntu20-calico-aio-hardening:
+ stage: deploy-part2
+ extends: .packet_pr
+ when: on_success
+
packet_ubuntu18-calico-aio:
stage: deploy-part2
extends: .packet_pr
@@ -151,6 +156,11 @@ packet_rockylinux8-calico:
extends: .packet_pr
when: on_success
+packet_rockylinux9-calico:
+ stage: deploy-part2
+ extends: .packet_pr
+ when: on_success
+
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index f0fbba312..32f708d1c 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -8,6 +8,7 @@ aliases:
- floryut
- oomichi
- cristicalin
+ - liupeng0518
kubespray-reviewers:
- holmsten
- bozzo
@@ -16,6 +17,7 @@ aliases:
- jayonlau
- cristicalin
- liupeng0518
+ - yankay
kubespray-emeritus_approvers:
- riverzhang
- atoms
diff --git a/README.md b/README.md
index 748edd88e..2a21db86c 100644
--- a/README.md
+++ b/README.md
@@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession
-docker pull quay.io/kubespray/kubespray:v2.19.0
+docker pull quay.io/kubespray/kubespray:v2.20.0
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
- quay.io/kubespray/kubespray:v2.19.0 bash
+ quay.io/kubespray/kubespray:v2.20.0 bash
# Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
@@ -113,6 +113,7 @@ vagrant up
- [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md)
+- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md)
## Supported Linux Distributions
@@ -120,13 +121,13 @@ vagrant up
- **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
-- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
+- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
- **Fedora** 35, 36
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed
-- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
-- **Alma Linux** [8](docs/centos.md#centos-8)
-- **Rocky Linux** [8](docs/centos.md#centos-8)
+- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
+- **Alma Linux** [8, 9](docs/centos.md#centos-8)
+- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
@@ -135,8 +136,8 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- - [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.4
- - [etcd](https://github.com/etcd-io/etcd) v3.5.4
+ - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.3
+ - [etcd](https://github.com/etcd-io/etcd) v3.5.5
- [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.6.8
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
@@ -144,20 +145,20 @@ Note: Upstart/SysV init based OS types are not supported.
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
- [calico](https://github.com/projectcalico/calico) v3.23.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- - [cilium](https://github.com/cilium/cilium) v1.11.7
- - [flannel](https://github.com/flannel-io/flannel) v0.18.1
+ - [cilium](https://github.com/cilium/cilium) v1.12.1
+ - [flannel](https://github.com/flannel-io/flannel) v0.19.2
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
- [multus](https://github.com/intel/multus-cni) v3.8
- [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
- Application
- - [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
+ - [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1
- [coredns](https://github.com/coredns/coredns) v1.8.6
- - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
+ - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.4.0
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- - [argocd](https://argoproj.github.io/) v2.4.7
- - [helm](https://helm.sh/) v3.9.2
+ - [argocd](https://argoproj.github.io/) v2.4.15
+ - [helm](https://helm.sh/) v3.9.4
- [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin
@@ -177,7 +178,7 @@ Note: Upstart/SysV init based OS types are not supported.
## Requirements
-- **Minimum required version of Kubernetes is v1.22**
+- **Minimum required version of Kubernetes is v1.23**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**.
@@ -246,6 +247,7 @@ See also [Network checker](docs/netcheck.md).
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
+- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests
diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS
index efd128f4e..21703b328 100644
--- a/SECURITY_CONTACTS
+++ b/SECURITY_CONTACTS
@@ -9,5 +9,7 @@
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
-atoms
mattymo
+floryut
+oomichi
+cristicalin
diff --git a/cluster.yml b/cluster.yml
index cc169f80b..5f163de6a 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -35,7 +35,7 @@
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
-- hosts: etcd
+- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@@ -59,7 +59,10 @@
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
- when: etcd_deployment_type != "kubeadm"
+ when:
+ - etcd_deployment_type != "kubeadm"
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster
gather_facts: False
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index 0e144c2be..1379e5247 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
+|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
@@ -294,7 +295,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
-using the `k8s_nodes` variable.
+using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
+The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
For example:
@@ -314,6 +316,7 @@ k8s_nodes = {
"az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
+ "extra_groups" = "calico_rr"
}
}
```
diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf
index 92c4394da..e4f302f61 100644
--- a/contrib/terraform/openstack/kubespray.tf
+++ b/contrib/terraform/openstack/kubespray.tf
@@ -84,6 +84,7 @@ module "compute" {
supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports
+ bastion_allowed_ports = var.bastion_allowed_ports
use_access_ip = var.use_access_ip
master_server_group_policy = var.master_server_group_policy
node_server_group_policy = var.node_server_group_policy
@@ -96,6 +97,7 @@ module "compute" {
network_router_id = module.network.router_id
network_id = module.network.network_id
use_existing_network = var.use_existing_network
+ private_subnet_id = module.network.subnet_id
depends_on = [
module.network.subnet_id
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index 15470dc2e..bf28d2758 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -82,6 +82,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
+resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
+ count = length(var.bastion_allowed_ports)
+ direction = "ingress"
+ ethertype = "IPv4"
+ protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
+ port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
+ port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
+ remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
+ security_group_id = openstack_networking_secgroup_v2.bastion[0].id
+}
+
resource "openstack_networking_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes"
@@ -195,6 +206,9 @@ resource "openstack_networking_port_v2" "bastion_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -245,6 +259,9 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -305,6 +322,9 @@ resource "openstack_networking_port_v2" "k8s_masters_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -363,6 +383,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -423,6 +446,9 @@ resource "openstack_networking_port_v2" "etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -477,6 +503,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -531,6 +560,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -586,6 +618,9 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -646,6 +681,9 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -701,6 +739,9 @@ resource "openstack_networking_port_v2" "k8s_nodes_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
@@ -742,7 +783,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
metadata = {
ssh_user = var.ssh_user
- kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
+ kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
@@ -760,6 +801,9 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
+ fixed_ip {
+ subnet_id = var.private_subnet_id
+ }
depends_on = [
var.network_router_id
diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf
index ca8034bb5..9259fd967 100644
--- a/contrib/terraform/openstack/modules/compute/variables.tf
+++ b/contrib/terraform/openstack/modules/compute/variables.tf
@@ -136,6 +136,10 @@ variable "worker_allowed_ports" {
type = list
}
+variable "bastion_allowed_ports" {
+ type = list
+}
+
variable "use_access_ip" {}
variable "master_server_group_policy" {
@@ -185,3 +189,7 @@ variable "port_security_enabled" {
variable "force_null_port_security" {
type = bool
}
+
+variable "private_subnet_id" {
+ type = string
+}
diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf
index 12c7f03a5..821e442b8 100644
--- a/contrib/terraform/openstack/variables.tf
+++ b/contrib/terraform/openstack/variables.tf
@@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
]
}
+variable "bastion_allowed_ports" {
+ type = list(any)
+
+ default = []
+}
+
variable "use_access_ip" {
default = 1
}
diff --git a/docs/ansible.md b/docs/ansible.md
index e1fb9a688..980b13650 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -281,7 +281,7 @@ For more information about Ansible and bastion hosts, read
## Mitogen
-Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
+Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
## Beyond ansible 2.9
diff --git a/docs/calico.md b/docs/calico.md
index a93b5cf5e..ad1115b1e 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
-The following variables need to be set:
-`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
-you'll need to edit the inventory and add a hostvar `local_as` by node.
+The following variables need to be set as follow:
+
+```yml
+peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
+nat_outgoing: false # (optional) NAT outgoing (default value: true).
+```
+
+And you'll need to edit the inventory and add a hostvar `local_as` by node.
```ShellSession
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
@@ -171,7 +176,7 @@ node5
[rack0:vars]
cluster_id="1.0.0.1"
-calcio_rr_id=rr1
+calico_rr_id=rr1
calico_group_id=rr1
```
@@ -200,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
calico_healthhost: "0.0.0.0"
```
+### Optional : Configure VXLAN hardware Offload
+
+Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this:
+
+```yml
+calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver)
+```
+
### Optional : Configure Calico Node probe timeouts
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
diff --git a/docs/centos.md b/docs/centos.md
index 12c27ea66..67a1f174b 100644
--- a/docs/centos.md
+++ b/docs/centos.md
@@ -2,12 +2,12 @@
## CentOS 7
-The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
+The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
## CentOS 8
-CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
+CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
The only tested configuration for now is using Calico CNI
You need to add `calico_iptables_backend: "NFT"` to your configuration.
diff --git a/docs/ci.md b/docs/ci.md
index c2d3de7ff..7e4991ded 100644
--- a/docs/ci.md
+++ b/docs/ci.md
@@ -16,6 +16,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
+rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
@@ -35,6 +36,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
+rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@@ -54,6 +56,7 @@ fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
+rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
diff --git a/docs/cilium.md b/docs/cilium.md
index e9c3e0d2b..e907d53cd 100644
--- a/docs/cilium.md
+++ b/docs/cilium.md
@@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version
```yml
-cilium_version: v1.11.3
+cilium_version: v1.12.1
```
## Add variable to config
diff --git a/docs/containerd.md b/docs/containerd.md
index 32de17683..847f7c9ca 100644
--- a/docs/containerd.md
+++ b/docs/containerd.md
@@ -39,4 +39,65 @@ containerd_registries:
image_command_tool: crictl
```
+### Containerd Runtimes
+
+Containerd supports multiple runtime configurations that can be used with
+[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
+details of containerd configuration.
+
+In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
+
+```yaml
+containerd_runc_runtime:
+ name: runc
+ type: "io.containerd.runc.v2"
+ engine: ""
+ root: ""
+ options:
+ systemdCgroup: "false"
+ binaryName: /usr/local/bin/my-runc
+ base_runtime_spec: cri-base.json
+```
+
+Further runtimes can be configured with `containerd_additional_runtimes`, which
+is a list of such dictionaries.
+
+Default runtime can be changed by setting `containerd_default_runtime`.
+
+#### base_runtime_spec
+
+`base_runtime_spec` key in a runtime dictionary can be used to explicitly
+specify a runtime spec json file. We ship the default one which is generated
+with `ctr oci spec > /etc/containerd/cri-base.json`. It will be used if you set
+`base_runtime_spec: cri-base.json`. The main advantage of doing so is the presence of
+`rlimits` section in this configuration, which will restrict the maximum number
+of file descriptors(open files) per container to 1024.
+
+You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
+
+```yaml
+containerd_base_runtime_specs:
+ cri-spec-custom.json: |
+ {
+ "ociVersion": "1.0.2-dev",
+ "process": {
+ "user": {
+ "uid": 0,
+ ...
+```
+
+The files in this dict will be placed in containerd config directory,
+`/etc/containerd` by default. The files can then be referenced by filename in a
+runtime:
+
+```yaml
+containerd_runc_runtime:
+ name: runc
+ base_runtime_spec: cri-spec-custom.json
+ ...
+```
+
[containerd]: https://containerd.io/
+[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
+[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
+[runtime-spec]: https://github.com/opencontainers/runtime-spec
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 04662594e..9d172b832 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -19,6 +19,14 @@ ndots value to be used in ``/etc/resolv.conf``
It is important to note that multiple search domains combined with high ``ndots``
values lead to poor performance of DNS stack, so please choose it wisely.
+## dns_timeout
+
+timeout value to be used in ``/etc/resolv.conf``
+
+## dns_attempts
+
+attempts value to be used in ``/etc/resolv.conf``
+
### searchdomains
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
@@ -26,6 +34,8 @@ Custom search domains to be added in addition to the cluster search domains (``d
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
+`remove_default_searchdomains: true` will remove the default cluster search domains.
+
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
additional search domains. Please take this into the accounts for the limits.
@@ -62,6 +72,13 @@ coredns_external_zones:
nameservers:
- 192.168.0.53
cache: 0
+- zones:
+ - mydomain.tld
+ nameservers:
+ - 10.233.0.3
+ cache: 5
+ rewrite:
+ - name stop website.tld website.namespace.svc.cluster.local
```
or as INI
@@ -263,7 +280,8 @@ nodelocaldns_secondary_skew_seconds: 5
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual
- limits are a 4 names and 239 chars respectively.
+ limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
+ added you are back to 6 names.
* the ``nameservers`` have a limitation of a 3 servers, although there
is a way to mitigate that with the ``upstream_dns_servers``,
diff --git a/docs/hardening.md b/docs/hardening.md
index df757df32..9a7f3d841 100644
--- a/docs/hardening.md
+++ b/docs/hardening.md
@@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
---
## kube-apiserver
-authorization_modes: ['Node','RBAC']
+authorization_modes: ['Node', 'RBAC']
# AppArmor-based OS
-#kube_apiserver_feature_gates: ['AppArmor=true']
+# kube_apiserver_feature_gates: ['AppArmor=true']
kube_apiserver_request_timeout: 120s
kube_apiserver_service_account_lookup: true
@@ -60,7 +60,7 @@ kube_profiling: false
kube_controller_manager_bind_address: 127.0.0.1
kube_controller_terminated_pod_gc_threshold: 50
# AppArmor-based OS
-#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"]
+# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
## kube-scheduler
@@ -68,13 +68,12 @@ kube_scheduler_bind_address: 127.0.0.1
kube_kubeadm_scheduler_extra_args:
profiling: false
# AppArmor-based OS
-#kube_scheduler_feature_gates: ["AppArmor=true"]
+# kube_scheduler_feature_gates: ["AppArmor=true"]
## etcd
etcd_deployment_type: kubeadm
## kubelet
-kubelet_authorization_mode_webhook: true
kubelet_authentication_token_webhook: true
kube_read_only_port: 0
kubelet_rotate_server_certificates: true
@@ -83,8 +82,15 @@ kubelet_event_record_qps: 1
kubelet_rotate_certificates: true
kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true
-kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
+kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
kubelet_seccomp_default: true
+kubelet_systemd_hardening: true
+# In case you have multiple interfaces in your
+# control plane nodes and you want to specify the right
+# IP addresses, kubelet_secure_addresses allows you
+# to specify the IP from which the kubelet
+# will receive the packets.
+kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations
kube_owner: root
@@ -103,6 +109,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: ).
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
+* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
+ ![kubelet hardening](img/kubelet-hardening.png)
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
diff --git a/docs/img/kubelet-hardening.png b/docs/img/kubelet-hardening.png
new file mode 100644
index 000000000..5546a8ba9
Binary files /dev/null and b/docs/img/kubelet-hardening.png differ
diff --git a/docs/metallb.md b/docs/metallb.md
index 7121f1ea0..faeb351ac 100644
--- a/docs/metallb.md
+++ b/docs/metallb.md
@@ -2,7 +2,7 @@
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
-The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
+The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
## Prerequisites
@@ -70,7 +70,7 @@ metallb_peers:
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
-In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
+In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
```yaml
metallb_speaker_enabled: false
diff --git a/docs/mirror.md b/docs/mirror.md
new file mode 100644
index 000000000..3138d2034
--- /dev/null
+++ b/docs/mirror.md
@@ -0,0 +1,66 @@
+# Public Download Mirror
+
+The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
+
+## Configuring Kubespray to use a mirror site
+
+You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
+
+```shell
+gcr_image_repo: "gcr.m.daocloud.io"
+kube_image_repo: "k8s.m.daocloud.io"
+docker_image_repo: "docker.m.daocloud.io"
+quay_image_repo: "quay.m.daocloud.io"
+github_image_repo: "ghcr.m.daocloud.io"
+
+files_repo: "https://files.m.daocloud.io"
+```
+
+Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
+You can replace the `m.daocloud.io` with any site you want.
+
+## Example Usage Full Steps
+
+You can follow the full steps to use the kubesray with mirror. for example:
+
+Install Ansible according to Ansible installation guide then run the following steps:
+
+```shell
+# Copy ``inventory/sample`` as ``inventory/mycluster``
+cp -rfp inventory/sample inventory/mycluster
+
+# Update Ansible inventory file with inventory builder
+declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
+CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
+
+# Use the download mirror
+cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
+sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
+tee -a inventory/mycluster/group_vars/all/mirror.yml <\\d+).(?P\\d+).(?P\\d+)$', '\\g.\\g') }}"
-crio_kubernetes_version_matrix:
- "1.24": "1.24"
- "1.23": "1.23"
- "1.22": "1.22"
-
-crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.24') }}"
-
# The crio_runtimes variable defines a list of OCI compatible runtimes.
crio_runtimes:
- name: runc
- path: /usr/bin/runc
+ path: "{{ bin_dir }}/runc"
type: oci
root: /run/runc
@@ -65,7 +58,7 @@ kata_runtimes:
# crun is a fast and low-memory footprint OCI Container Runtime fully written in C.
crun_runtime:
name: crun
- path: /usr/bin/crun
+ path: "{{ bin_dir }}/crun"
type: oci
root: /run/crun
@@ -76,20 +69,10 @@ youki_runtime:
type: oci
root: /run/youki
-# When this is true, CRI-O package repositories are added. Set this to false when using an
-# environment with preconfigured CRI-O package repositories.
-crio_add_repos: true
-
-# Allow crio offline installation
+# TODO(cristicalin): remove this after 2.21
crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
-
-# Allow crio offline installation
crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
-# skopeo need for save/load images when download_run_once=true
-skopeo_packages:
- - "skopeo"
-
# Configure the cri-o pids limit, increase this for heavily multi-threaded workloads
# see https://github.com/cri-o/cri-o/issues/1921
crio_pids_limit: 1024
@@ -102,3 +85,19 @@ crio_subuid_start: 2130706432
crio_subuid_length: 16777216
crio_subgid_start: 2130706432
crio_subgid_length: 16777216
+
+# cri-o binary files
+crio_bin_files:
+ - conmon
+ - crio
+ - crio-status
+ - pinns
+
+# cri-o manual files
+crio_man_files:
+ 5:
+ - crio.conf
+ - crio.conf.d
+ 8:
+ - crio
+ - crio-status
diff --git a/roles/container-engine/cri-o/meta/main.yml b/roles/container-engine/cri-o/meta/main.yml
index ec9d9a55e..3304f70cf 100644
--- a/roles/container-engine/cri-o/meta/main.yml
+++ b/roles/container-engine/cri-o/meta/main.yml
@@ -1,3 +1,5 @@
---
dependencies:
- role: container-engine/crictl
+ - role: container-engine/runc
+ - role: container-engine/skopeo
diff --git a/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf b/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf
new file mode 100644
index 000000000..f10935b75
--- /dev/null
+++ b/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf
@@ -0,0 +1,17 @@
+{
+ "cniVersion": "0.2.0",
+ "name": "mynet",
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "172.19.0.0/24",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ]
+ }
+}
diff --git a/roles/container-engine/cri-o/molecule/default/files/container.json b/roles/container-engine/cri-o/molecule/default/files/container.json
new file mode 100644
index 000000000..bcd71e7e5
--- /dev/null
+++ b/roles/container-engine/cri-o/molecule/default/files/container.json
@@ -0,0 +1,10 @@
+{
+ "metadata": {
+ "name": "runc1"
+ },
+ "image": {
+ "image": "quay.io/kubespray/hello-world:latest"
+ },
+ "log_path": "runc1.0.log",
+ "linux": {}
+}
diff --git a/roles/container-engine/cri-o/molecule/default/files/sandbox.json b/roles/container-engine/cri-o/molecule/default/files/sandbox.json
new file mode 100644
index 000000000..eb9dcb9d2
--- /dev/null
+++ b/roles/container-engine/cri-o/molecule/default/files/sandbox.json
@@ -0,0 +1,10 @@
+{
+ "metadata": {
+ "name": "runc1",
+ "namespace": "default",
+ "attempt": 1,
+ "uid": "hdishd83djaidwnduwk28bcsb"
+ },
+ "linux": {},
+ "log_directory": "/tmp"
+}
diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml
index 1c67a648c..163eb8e60 100644
--- a/roles/container-engine/cri-o/molecule/default/molecule.yml
+++ b/roles/container-engine/cri-o/molecule/default/molecule.yml
@@ -7,24 +7,38 @@ lint: |
set -e
yamllint -c ../../../.yamllint .
platforms:
- - name: ubuntu2004
+ - name: ubuntu20
box: generic/ubuntu2004
cpus: 2
memory: 1024
groups:
- kube_control_plane
+ - kube_node
+ - k8s_cluster
- name: almalinux8
box: almalinux/8
cpus: 2
memory: 1024
groups:
- kube_control_plane
+ - kube_node
+ - k8s_cluster
- name: fedora
- box: fedora/35-cloud-base
+ box: fedora/36-cloud-base
cpus: 2
memory: 1024
groups:
- kube_control_plane
+ - kube_node
+ - k8s_cluster
+ - name: debian10
+ box: generic/debian10
+ cpus: 2
+ memory: 1024
+ groups:
+ - kube_control_plane
+ - kube_node
+ - k8s_cluster
provisioner:
name: ansible
env:
diff --git a/roles/container-engine/cri-o/molecule/default/prepare.yml b/roles/container-engine/cri-o/molecule/default/prepare.yml
index 1afc51a04..ec47a1e5b 100644
--- a/roles/container-engine/cri-o/molecule/default/prepare.yml
+++ b/roles/container-engine/cri-o/molecule/default/prepare.yml
@@ -2,5 +2,51 @@
- name: Prepare
hosts: all
gather_facts: False
+ become: true
+ vars:
+ ignore_assert_errors: true
roles:
+ - role: kubespray-defaults
- role: bootstrap-os
+ - role: kubernetes/preinstall
+ - role: adduser
+ user: "{{ addusers.kube }}"
+ tasks:
+ - include_tasks: "../../../../download/tasks/download_file.yml"
+ vars:
+ download: "{{ download_defaults | combine(downloads.cni) }}"
+
+- name: Prepare CNI
+ hosts: all
+ gather_facts: False
+ become: true
+ vars:
+ ignore_assert_errors: true
+ kube_network_plugin: cni
+ roles:
+ - role: kubespray-defaults
+ - role: network_plugin/cni
+ tasks:
+ - name: Copy test container files
+ copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ owner: root
+ mode: 0644
+ with_items:
+ - container.json
+ - sandbox.json
+ - name: Create /etc/cni/net.d directory
+ file:
+ path: /etc/cni/net.d
+ state: directory
+ owner: "{{ kube_owner }}"
+ mode: 0755
+ - name: Setup CNI
+ copy:
+ src: "{{ item }}"
+ dest: "/etc/cni/net.d/{{ item }}"
+ owner: root
+ mode: 0644
+ with_items:
+ - 10-mynet.conf
diff --git a/roles/container-engine/cri-o/molecule/default/tests/test_default.py b/roles/container-engine/cri-o/molecule/default/tests/test_default.py
index b7f3bd6db..358a1b75a 100644
--- a/roles/container-engine/cri-o/molecule/default/tests/test_default.py
+++ b/roles/container-engine/cri-o/molecule/default/tests/test_default.py
@@ -19,3 +19,17 @@ def test_run(host):
cmd = host.command(crictl + " --runtime-endpoint " + path + " version")
assert cmd.rc == 0
assert "RuntimeName: cri-o" in cmd.stdout
+
+def test_run_pod(host):
+ runtime = "runc"
+
+ run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
+ with host.sudo():
+ cmd = host.command(run_command)
+ assert cmd.rc == 0
+
+ with host.sudo():
+ log_f = host.file("/tmp/runc1.0.log")
+
+ assert log_f.exists
+ assert b"Hello from Docker" in log_f.content
diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml
new file mode 100644
index 000000000..28c0c3af2
--- /dev/null
+++ b/roles/container-engine/cri-o/tasks/cleanup.yaml
@@ -0,0 +1,119 @@
+---
+# TODO(cristicalin): drop this file after 2.21
+- name: CRI-O kubic repo name for debian os family
+ set_fact:
+ crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
+ when: ansible_os_family == "Debian"
+
+- name: Remove legacy CRI-O kubic apt repo key
+ apt_key:
+ url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
+ state: absent
+ when: crio_kubic_debian_repo_name is defined
+
+- name: Remove legacy CRI-O kubic apt repo
+ apt_repository:
+ repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
+ state: absent
+ filename: devel-kubic-libcontainers-stable
+ when: crio_kubic_debian_repo_name is defined
+
+- name: Remove legacy CRI-O kubic cri-o apt repo
+ apt_repository:
+ repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
+ state: absent
+ filename: devel-kubic-libcontainers-stable-cri-o
+ when: crio_kubic_debian_repo_name is defined
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: devel_kubic_libcontainers_stable
+ description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
+ baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
+ state: absent
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution not in ["Amazon", "Fedora"]
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
+ description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
+ baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
+ state: absent
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution not in ["Amazon", "Fedora"]
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: devel_kubic_libcontainers_stable
+ description: Stable Releases of Upstream github.com/containers packages
+ baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
+ state: absent
+ when:
+ - ansible_distribution in ["Fedora"]
+ - not is_ostree
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
+ description: "CRI-O {{ crio_version }}"
+ baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
+ state: absent
+ when:
+ - ansible_distribution in ["Fedora"]
+ - not is_ostree
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: devel_kubic_libcontainers_stable
+ description: Stable Releases of Upstream github.com/containers packages
+ baseurl: http://{{ crio_download_base }}/CentOS_7/
+ state: absent
+ when: ansible_distribution in ["Amazon"]
+
+- name: Remove legacy CRI-O kubic yum repo
+ yum_repository:
+ name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
+ description: "CRI-O {{ crio_version }}"
+ baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
+ state: absent
+ when: ansible_distribution in ["Amazon"]
+
+- name: Disable modular repos for CRI-O
+ ini_file:
+ path: "/etc/yum.repos.d/{{ item.repo }}.repo"
+ section: "{{ item.section }}"
+ option: enabled
+ value: 0
+ mode: 0644
+ become: true
+ when: is_ostree
+ loop:
+ - repo: "fedora-updates-modular"
+ section: "updates-modular"
+ - repo: "fedora-modular"
+ section: "fedora-modular"
+
+# Disable any older module version if we enabled them before
+- name: Disable CRI-O ex module
+ command: "rpm-ostree ex module disable cri-o:{{ item }}"
+ become: true
+ when:
+ - is_ostree
+ - ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
+ with_items:
+ - 1.22
+ - 1.23
+ - 1.24
+
+- name: cri-o | remove installed packages
+ package:
+ name: "{{ item }}"
+ state: absent
+ when: not is_ostree
+ with_items:
+ - cri-o
+ - cri-o-runc
+ - oci-systemd-hook
diff --git a/roles/container-engine/cri-o/tasks/crio_repo.yml b/roles/container-engine/cri-o/tasks/crio_repo.yml
deleted file mode 100644
index dc67bf13a..000000000
--- a/roles/container-engine/cri-o/tasks/crio_repo.yml
+++ /dev/null
@@ -1,179 +0,0 @@
----
-- block:
- - name: Add Debian Backports apt repo
- apt_repository:
- repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
- state: present
- filename: debian-backports
-
- - name: Set libseccomp2 pin priority to apt_preferences on Debian buster
- copy:
- content: |
- Package: libseccomp2
- Pin: release a={{ ansible_distribution_release }}-backports
- Pin-Priority: 1001
- dest: "/etc/apt/preferences.d/libseccomp2"
- owner: "root"
- mode: 0644
- when:
- - ansible_distribution == "Debian"
- - ansible_distribution_version == "10"
-
-- name: CRI-O kubic repo name for debian os family
- set_fact:
- crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
- when: ansible_os_family == "Debian"
-
-- name: Add CRI-O kubic apt repo key
- apt_key:
- url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
- state: present
- when: crio_kubic_debian_repo_name is defined
- register: apt_key_download
- until: apt_key_download is succeeded
- retries: 4
- delay: "{{ retry_stagger | d(3) }}"
- environment: "{{ proxy_env }}"
-
-- name: Add CRI-O kubic apt repo
- apt_repository:
- repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
- state: present
- filename: devel-kubic-libcontainers-stable
- when: crio_kubic_debian_repo_name is defined
-
-- name: Add CRI-O kubic cri-o apt repo
- apt_repository:
- repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
- state: present
- filename: devel-kubic-libcontainers-stable-cri-o
- when: crio_kubic_debian_repo_name is defined
-
-- name: Check that amzn2-extras.repo exists
- stat:
- path: /etc/yum.repos.d/amzn2-extras.repo
- register: amzn2_extras_file_stat
- when: ansible_distribution in ["Amazon"]
-
-- name: Find docker repo in amzn2-extras.repo file
- lineinfile:
- dest: /etc/yum.repos.d/amzn2-extras.repo
- line: "[amzn2extra-docker]"
- check_mode: yes
- register: amzn2_extras_docker_repo
- when:
- - ansible_distribution in ["Amazon"]
- - amzn2_extras_file_stat.stat.exists
-
-- name: Remove docker repository
- ini_file:
- dest: /etc/yum.repos.d/amzn2-extras.repo
- section: amzn2extra-docker
- option: enabled
- value: "0"
- backup: yes
- mode: 0644
- when:
- - ansible_distribution in ["Amazon"]
- - amzn2_extras_file_stat.stat.exists
- - not amzn2_extras_docker_repo.changed
-
-- name: Add container-selinux yum repo
- yum_repository:
- name: copr:copr.fedorainfracloud.org:lsm5:container-selinux
- file: _copr_lsm5-container-selinux.repo
- description: Copr repo for container-selinux owned by lsm5
- baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/
- gpgcheck: yes
- gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg
- skip_if_unavailable: yes
- enabled: yes
- repo_gpgcheck: no
- when: ansible_distribution in ["Amazon"]
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: devel_kubic_libcontainers_stable
- description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
- baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
- gpgcheck: yes
- gpgkey: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key
- keepcache: "0"
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution not in ["Amazon", "Fedora"]
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
- description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
- baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
- gpgcheck: yes
- gpgkey: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key"
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution not in ["Amazon", "Fedora"]
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: devel_kubic_libcontainers_stable
- description: Stable Releases of Upstream github.com/containers packages
- baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
- gpgcheck: yes
- gpgkey: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key
- keepcache: "0"
- when:
- - ansible_distribution in ["Fedora"]
- - not is_ostree
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
- description: "CRI-O {{ crio_version }}"
- baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
- gpgcheck: yes
- gpgkey: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key"
- when:
- - ansible_distribution in ["Fedora"]
- - not is_ostree
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: devel_kubic_libcontainers_stable
- description: Stable Releases of Upstream github.com/containers packages
- baseurl: http://{{ crio_download_base }}/CentOS_7/
- gpgcheck: yes
- gpgkey: http://{{ crio_download_base }}/CentOS_7/repodata/repomd.xml.key
- keepcache: "0"
- when: ansible_distribution in ["Amazon"]
-
-- name: Add CRI-O kubic yum repo
- yum_repository:
- name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
- description: "CRI-O {{ crio_version }}"
- baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
- gpgcheck: yes
- gpgkey: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/repodata/repomd.xml.key"
- when: ansible_distribution in ["Amazon"]
-
-- name: Enable modular repos for CRI-O
- ini_file:
- path: "/etc/yum.repos.d/{{ item.repo }}.repo"
- section: "{{ item.section }}"
- option: enabled
- value: 1
- mode: 0644
- become: true
- when: is_ostree
- loop:
- - repo: "fedora-updates-modular"
- section: "updates-modular"
- - repo: "fedora-modular"
- section: "fedora-modular"
-
-- name: Enable CRI-O ex module
- command: "rpm-ostree ex module enable cri-o:{{ crio_version }}"
- become: true
- when:
- - is_ostree
- - ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index 2b84b0978..89aab567e 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- name: check if fedora coreos
+- name: cri-o | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,57 +7,48 @@
get_mime: no
register: ostree
-- name: set is_ostree
+- name: cri-o | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
-- name: get ostree version
+- name: cri-o | get ostree version
shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
args:
executable: /bin/bash
register: ostree_version
when: is_ostree
-- name: gather os specific variables
- include_vars: "{{ item }}"
- with_first_found:
- - files:
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- - "{{ ansible_distribution|lower }}.yml"
- - "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
- - "{{ ansible_os_family|lower }}.yml"
- - defaults.yml
- paths:
- - ../vars
- skip: true
- tags:
- - facts
+- name: cri-o | Download cri-o
+ include_tasks: "../../../download/tasks/download_file.yml"
+ vars:
+ download: "{{ download_defaults | combine(downloads.crio) }}"
-- name: import crio repo
- import_tasks: "crio_repo.yml"
- when: crio_add_repos
+- name: cri-o | special handling for amazon linux
+ import_tasks: "setup-amazon.yaml"
+ when: ansible_distribution in ["Amazon"]
-- name: Build a list of crio runtimes with Katacontainers runtimes
+- name: cri-o | clean up reglacy repos
+ import_tasks: "cleanup.yaml"
+
+- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
set_fact:
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
when:
- kata_containers_enabled
-- name: Build a list of crio runtimes with crun runtime
+- name: cri-o | build a list of crio runtimes with crun runtime
set_fact:
crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
when:
- crun_enabled
-- name: Build a list of crio runtimes with youki runtime
+- name: cri-o | build a list of crio runtimes with youki runtime
set_fact:
crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
when:
- youki_enabled
-- name: Make sure needed folders exist in the system
+- name: cri-o | make sure needed folders exist in the system
with_items:
- /etc/crio
- /etc/containers
@@ -67,98 +58,47 @@
state: directory
mode: 0755
-- name: Install cri-o config
+- name: cri-o | install cri-o config
template:
src: crio.conf.j2
dest: /etc/crio/crio.conf
mode: 0644
register: config_install
-- name: Install config.json
+- name: cri-o | install config.json
template:
src: config.json.j2
dest: /etc/crio/config.json
mode: 0644
register: reg_auth_install
-- name: Add skopeo pkg to install
- set_fact:
- crio_packages: "{{ crio_packages + skopeo_packages }}"
- when:
- - not skip_downloads|default(false)
- - download_run_once
-
-- name: Add libseccomp2 package from Debian Backports to install
- set_fact:
- crio_packages: "{{ crio_debian_buster_backports_packages + crio_packages }}"
- when:
- - ansible_distribution == "Debian"
- - ansible_distribution_version == "10"
-
-- name: Remove dpkg hold
- dpkg_selections:
- name: "{{ item | split ('=') | first }}"
- selection: install
- when:
- - ansible_pkg_mgr == 'apt'
- changed_when: false
+- name: cri-o | copy binaries
+ copy:
+ src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
+ dest: "{{ bin_dir }}/{{ item }}"
+ mode: 0755
+ remote_src: true
with_items:
- - "{{ crio_packages }}"
+ - "{{ crio_bin_files }}"
+ notify: restart crio
-- name: Install cri-o packages
- package:
- name: "{{ item }}"
- state: present
- when: not is_ostree
- with_items: "{{ crio_packages }}"
- register: package_install
- until: package_install is succeeded
- retries: 4
- delay: "{{ retry_stagger | d(3) }}"
+- name: cri-o | copy service file
+ copy:
+ src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
+ dest: /etc/systemd/system/crio.service
+ mode: 0755
+ remote_src: true
+ notify: restart crio
-# This is required to ensure any apt upgrade will not break kubernetes
-- name: Tell Debian hosts not to change the cri-o version with apt upgrade
- dpkg_selections:
- name: "{{ item | split ('=') | first }}"
- selection: hold
- when:
- - ansible_pkg_mgr == 'apt'
- changed_when: false
- with_items:
- - "{{ crio_packages }}"
+- name: cri-o | copy default policy
+ copy:
+ src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
+ dest: /etc/containers/policy.json
+ mode: 0755
+ remote_src: true
+ notify: restart crio
-- name: Check if already installed
- stat:
- path: "/bin/crio"
- get_attributes: no
- get_checksum: no
- get_mime: no
- register: need_bootstrap_crio
- when: is_ostree
-
-- name: Install cri-o packages with ostree
- command: "rpm-ostree install {{ crio_packages|join(' ') }}"
- when:
- - is_ostree
- - not need_bootstrap_crio.stat.exists
- become: true
-
-- name: Reboot immediately for updated ostree
- reboot:
- become: true
- when:
- - is_ostree
- - not need_bootstrap_crio.stat.exists
-
-- name: Remove example CNI configs
- file:
- path: "/etc/cni/net.d/{{ item }}"
- state: absent
- loop:
- - 100-crio-bridge.conf
- - 200-loopback.conf
-
-- name: Copy mounts.conf
+- name: cri-o | copy mounts.conf
copy:
src: mounts.conf
dest: /etc/containers/mounts.conf
@@ -167,15 +107,28 @@
- ansible_os_family == 'RedHat'
notify: restart crio
-- name: Create directory for oci hooks
+- name: cri-o | create directory for oci hooks
file:
path: /etc/containers/oci/hooks.d
state: directory
owner: root
mode: 0755
+- name: cri-o | set overlay driver
+ ini_file:
+ dest: /etc/containers/storage.conf
+ section: storage
+ option: "{{ item.option }}"
+ value: "{{ item.value }}"
+ mode: 0644
+ with_items:
+ - option: driver
+ value: '"overlay"'
+ - option: graphroot
+ value: '"/var/lib/containers/storage"'
+
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
-- name: Set metacopy mount options correctly
+- name: cri-o | set metacopy mount options correctly
ini_file:
dest: /etc/containers/storage.conf
section: storage.options.overlay
@@ -183,14 +136,14 @@
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
mode: 0644
-- name: Create directory registries configs
+- name: cri-o | create directory registries configs
file:
path: /etc/containers/registries.conf.d
state: directory
owner: root
mode: 0755
-- name: Write registries configs
+- name: cri-o | write registries configs
template:
src: registry.conf.j2
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
@@ -198,14 +151,14 @@
loop: "{{ crio_registries }}"
notify: restart crio
-- name: Configure unqualified registry settings
+- name: cri-o | configure unqualified registry settings
template:
src: unqualified.conf.j2
dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
mode: 0644
notify: restart crio
-- name: Write cri-o proxy drop-in
+- name: cri-o | write cri-o proxy drop-in
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
@@ -213,7 +166,7 @@
notify: restart crio
when: http_proxy is defined or https_proxy is defined
-- name: Configure the uid/gid space for user namespaces
+- name: cri-o | configure the uid/gid space for user namespaces
lineinfile:
path: '{{ item.path }}'
line: '{{ item.entry }}'
@@ -227,7 +180,7 @@
loop_control:
label: '{{ item.path }}'
-- name: Ensure crio service is started and enabled
+- name: cri-o | ensure crio service is started and enabled
service:
name: crio
daemon_reload: true
@@ -235,18 +188,17 @@
state: started
register: service_start
-- name: Trigger service restart only when needed
+- name: cri-o | trigger service restart only when needed
service: # noqa 503
name: crio
state: restarted
when:
- config_install.changed
- reg_auth_install.changed
- - not package_install.changed
- not service_start.changed
-- name: Verify that crio is running
- command: "crio-status info"
+- name: cri-o | verify that crio is running
+ command: "{{ bin_dir }}/crio-status info"
register: get_crio_info
until: get_crio_info is succeeded
changed_when: false
diff --git a/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
new file mode 100644
index 000000000..369036725
--- /dev/null
+++ b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
@@ -0,0 +1,38 @@
+---
+- name: Check that amzn2-extras.repo exists
+ stat:
+ path: /etc/yum.repos.d/amzn2-extras.repo
+ register: amzn2_extras_file_stat
+
+- name: Find docker repo in amzn2-extras.repo file
+ lineinfile:
+ dest: /etc/yum.repos.d/amzn2-extras.repo
+ line: "[amzn2extra-docker]"
+ check_mode: yes
+ register: amzn2_extras_docker_repo
+ when:
+ - amzn2_extras_file_stat.stat.exists
+
+- name: Remove docker repository
+ ini_file:
+ dest: /etc/yum.repos.d/amzn2-extras.repo
+ section: amzn2extra-docker
+ option: enabled
+ value: "0"
+ backup: yes
+ mode: 0644
+ when:
+ - amzn2_extras_file_stat.stat.exists
+ - not amzn2_extras_docker_repo.changed
+
+- name: Add container-selinux yum repo
+ yum_repository:
+ name: copr:copr.fedorainfracloud.org:lsm5:container-selinux
+ file: _copr_lsm5-container-selinux.repo
+ description: Copr repo for container-selinux owned by lsm5
+ baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/
+ gpgcheck: yes
+ gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg
+ skip_if_unavailable: yes
+ enabled: yes
+ repo_gpgcheck: no
diff --git a/roles/container-engine/cri-o/vars/almalinux-8.yml b/roles/container-engine/cri-o/vars/almalinux-8.yml
deleted file mode 120000
index 039ea3828..000000000
--- a/roles/container-engine/cri-o/vars/almalinux-8.yml
+++ /dev/null
@@ -1 +0,0 @@
-centos-8.yml
\ No newline at end of file
diff --git a/roles/container-engine/cri-o/vars/amazon.yml b/roles/container-engine/cri-o/vars/amazon.yml
deleted file mode 100644
index e4668b333..000000000
--- a/roles/container-engine/cri-o/vars/amazon.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-crio_storage_driver: "overlay"
-
-crio_versioned_pkg:
- "1.24":
- - "cri-o-1.24.*"
- "1.23":
- - "cri-o-1.23.*"
- "1.22":
- - "cri-o-1.22.*"
-
-default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
-
-crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"
diff --git a/roles/container-engine/cri-o/vars/centos-7.yml b/roles/container-engine/cri-o/vars/centos-7.yml
deleted file mode 100644
index c6556fbfe..000000000
--- a/roles/container-engine/cri-o/vars/centos-7.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-crio_versioned_pkg:
- "1.24":
- - "cri-o-1.24.*"
- "1.23":
- - "cri-o-1.23.*"
- "1.22":
- - "cri-o-1.22.*"
-
-default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
-
-crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"
diff --git a/roles/container-engine/cri-o/vars/centos-8.yml b/roles/container-engine/cri-o/vars/centos-8.yml
deleted file mode 100644
index c6556fbfe..000000000
--- a/roles/container-engine/cri-o/vars/centos-8.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-crio_versioned_pkg:
- "1.24":
- - "cri-o-1.24.*"
- "1.23":
- - "cri-o-1.23.*"
- "1.22":
- - "cri-o-1.22.*"
-
-default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
-
-crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"
diff --git a/roles/container-engine/cri-o/vars/clearlinux.yml b/roles/container-engine/cri-o/vars/clearlinux.yml
deleted file mode 100644
index e150b84a6..000000000
--- a/roles/container-engine/cri-o/vars/clearlinux.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-crio_packages:
- - containers-basic
-
-crio_conmon: /usr/libexec/crio/conmon
-crio_seccomp_profile: /usr/share/defaults/crio/seccomp.json
diff --git a/roles/container-engine/cri-o/vars/debian.yml b/roles/container-engine/cri-o/vars/debian.yml
deleted file mode 100644
index d7b5209f6..000000000
--- a/roles/container-engine/cri-o/vars/debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-crio_versioned_pkg:
- "1.24":
- - "cri-o=1.24*"
- - cri-o-runc
- "1.23":
- - "cri-o=1.23*"
- - cri-o-runc
- "1.22":
- - "cri-o=1.22*"
- - cri-o-runc
-
-crio_debian_buster_backports_packages:
- - "libseccomp2"
-
-default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
-
-crio_packages: "{{ debian_crio_packages | default(default_crio_packages) }}"
-
-# The crio_runtimes variable defines a list of OCI compatible runtimes.
-crio_runtimes:
- - name: runc
- path: /usr/sbin/runc
- type: oci
- root: /run/runc
diff --git a/roles/container-engine/cri-o/vars/fedora-36.yml b/roles/container-engine/cri-o/vars/fedora-36.yml
deleted file mode 100644
index 53d669256..000000000
--- a/roles/container-engine/cri-o/vars/fedora-36.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-crio_packages:
- - cri-o
-
-crio_version: 1.24
diff --git a/roles/container-engine/cri-o/vars/fedora.yml b/roles/container-engine/cri-o/vars/fedora.yml
deleted file mode 100644
index 9ba130b98..000000000
--- a/roles/container-engine/cri-o/vars/fedora.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-crio_packages:
- - cri-o
-
-crio_kubernetes_version_matrix:
- "1.24": "1.23"
- "1.23": "1.23"
- "1.22": "1.22"
-crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.23') }}"
diff --git a/roles/container-engine/cri-o/vars/oraclelinux-8.yml b/roles/container-engine/cri-o/vars/oraclelinux-8.yml
deleted file mode 120000
index 039ea3828..000000000
--- a/roles/container-engine/cri-o/vars/oraclelinux-8.yml
+++ /dev/null
@@ -1 +0,0 @@
-centos-8.yml
\ No newline at end of file
diff --git a/roles/container-engine/cri-o/vars/redhat.yml b/roles/container-engine/cri-o/vars/redhat.yml
deleted file mode 100644
index c20c9ba8f..000000000
--- a/roles/container-engine/cri-o/vars/redhat.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-crio_packages:
- - cri-o
- - oci-systemd-hook
diff --git a/roles/container-engine/cri-o/vars/rocky-8.yml b/roles/container-engine/cri-o/vars/rocky-8.yml
deleted file mode 120000
index 039ea3828..000000000
--- a/roles/container-engine/cri-o/vars/rocky-8.yml
+++ /dev/null
@@ -1 +0,0 @@
-centos-8.yml
\ No newline at end of file
diff --git a/roles/container-engine/cri-o/vars/ubuntu.yml b/roles/container-engine/cri-o/vars/ubuntu.yml
deleted file mode 100644
index 632c379b5..000000000
--- a/roles/container-engine/cri-o/vars/ubuntu.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-crio_versioned_pkg:
- "1.24":
- - "cri-o=1.24*"
- - cri-o-runc
- "1.23":
- - "cri-o=1.23*"
- - cri-o-runc
- "1.22":
- - "cri-o=1.22*"
- - cri-o-runc
-
-default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
-
-crio_packages: "{{ ubuntu_crio_packages | default(default_crio_packages) }}"
-
-# The crio_runtimes variable defines a list of OCI compatible runtimes.
-crio_runtimes:
- - name: runc
- path: /usr/sbin/runc
- type: oci
- root: /run/runc
diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml
index 93bd955a6..d46bfa8b8 100644
--- a/roles/container-engine/docker/vars/debian.yml
+++ b/roles/container-engine/docker/vars/debian.yml
@@ -18,17 +18,17 @@ docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
- '20.10': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
- 'stable': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
- 'edge': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
+ '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+ 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+ 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
- '20.10': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
- 'stable': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
- 'edge': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }}
+ '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+ 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+ 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
docker_package_info:
pkgs:
diff --git a/roles/container-engine/docker/vars/fedora.yml b/roles/container-engine/docker/vars/fedora.yml
index 0fcac50fa..8972fd891 100644
--- a/roles/container-engine/docker/vars/fedora.yml
+++ b/roles/container-engine/docker/vars/fedora.yml
@@ -18,16 +18,16 @@ containerd_versioned_pkg:
docker_versioned_pkg:
'latest': docker-ce
'19.03': docker-ce-19.03.15-3.fc{{ ansible_distribution_major_version }}
- '20.10': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }}
- 'stable': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }}
- 'edge': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }}
+ '20.10': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
+ 'stable': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
+ 'edge': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'19.03': docker-ce-cli-19.03.15-3.fc{{ ansible_distribution_major_version }}
- '20.10': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }}
- 'stable': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }}
- 'edge': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }}
+ '20.10': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
+ 'stable': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
+ 'edge': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
docker_package_info:
enablerepo: "docker-ce"
diff --git a/roles/container-engine/docker/vars/redhat-7.yml b/roles/container-engine/docker/vars/redhat-7.yml
index 7fd3d4e44..e37c41621 100644
--- a/roles/container-engine/docker/vars/redhat-7.yml
+++ b/roles/container-engine/docker/vars/redhat-7.yml
@@ -20,17 +20,17 @@ docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce-18.09.9-3.el7
'19.03': docker-ce-19.03.15-3.el7
- '20.10': docker-ce-20.10.17-3.el7
- 'stable': docker-ce-20.10.17-3.el7
- 'edge': docker-ce-20.10.17-3.el7
+ '20.10': docker-ce-20.10.20-3.el7
+ 'stable': docker-ce-20.10.20-3.el7
+ 'edge': docker-ce-20.10.20-3.el7
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli-18.09.9-3.el7
'19.03': docker-ce-cli-19.03.15-3.el7
- '20.10': docker-ce-cli-20.10.17-3.el7
- 'stable': docker-ce-cli-20.10.17-3.el7
- 'edge': docker-ce-cli-20.10.17-3.el7
+ '20.10': docker-ce-cli-20.10.20-3.el7
+ 'stable': docker-ce-cli-20.10.20-3.el7
+ 'edge': docker-ce-cli-20.10.20-3.el7
docker_package_info:
enablerepo: "docker-ce"
diff --git a/roles/container-engine/docker/vars/redhat.yml b/roles/container-engine/docker/vars/redhat.yml
index 69fb54ed1..836763faa 100644
--- a/roles/container-engine/docker/vars/redhat.yml
+++ b/roles/container-engine/docker/vars/redhat.yml
@@ -20,17 +20,17 @@ docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce-3:18.09.9-3.el7
'19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }}
- '20.10': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }}
- 'stable': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }}
- 'edge': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }}
+ '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
+ 'stable': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
+ 'edge': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli-1:18.09.9-3.el7
'19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }}
- '20.10': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }}
- 'stable': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }}
- 'edge': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }}
+ '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
+ 'stable': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
+ 'edge': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
docker_package_info:
enablerepo: "docker-ce"
diff --git a/roles/container-engine/docker/vars/ubuntu.yml b/roles/container-engine/docker/vars/ubuntu.yml
index ea5cc5e2a..cced07e11 100644
--- a/roles/container-engine/docker/vars/ubuntu.yml
+++ b/roles/container-engine/docker/vars/ubuntu.yml
@@ -18,17 +18,17 @@ docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- '20.10': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- 'stable': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- 'edge': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- '20.10': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- 'stable': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
- 'edge': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+ 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_package_info:
pkgs:
diff --git a/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 b/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2
index f64647bdf..40382423d 100644
--- a/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2
+++ b/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2
@@ -459,7 +459,7 @@ enable_debug = {{ kata_containers_qemu_debug }}
#
# If enabled, the default trace mode is "dynamic" and the
# default trace type is "isolated". The trace mode and type are set
-# explicity with the `trace_type=` and `trace_mode=` options.
+# explicitly with the `trace_type=` and `trace_mode=` options.
#
# Notes:
#
diff --git a/roles/container-engine/runc/tasks/main.yml b/roles/container-engine/runc/tasks/main.yml
index 94f97c0ba..7a8e336c2 100644
--- a/roles/container-engine/runc/tasks/main.yml
+++ b/roles/container-engine/runc/tasks/main.yml
@@ -1,4 +1,12 @@
---
+- name: runc | check if fedora coreos
+ stat:
+ path: /run/ostree-booted
+ get_attributes: no
+ get_checksum: no
+ get_mime: no
+ register: ostree
+
- name: runc | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml
new file mode 100644
index 000000000..033ae629f
--- /dev/null
+++ b/roles/container-engine/skopeo/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+- name: skopeo | check if fedora coreos
+ stat:
+ path: /run/ostree-booted
+ get_attributes: no
+ get_checksum: no
+ get_mime: no
+ register: ostree
+
+- name: skopeo | set is_ostree
+ set_fact:
+ is_ostree: "{{ ostree.stat.exists }}"
+
+- name: skopeo | Uninstall skopeo package managed by package manager
+ package:
+ name: skopeo
+ state: absent
+ when:
+ - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
+ ignore_errors: true # noqa ignore-errors
+
+- name: skopeo | Download skopeo binary
+ include_tasks: "../../../download/tasks/download_file.yml"
+ vars:
+ download: "{{ download_defaults | combine(downloads.skopeo) }}"
+
+- name: Copy skopeo binary from download dir
+ copy:
+ src: "{{ downloads.skopeo.dest }}"
+ dest: "{{ bin_dir }}/skopeo"
+ mode: 0755
+ remote_src: true
diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml
index d5b28c29c..fdd60e0e2 100644
--- a/roles/container-engine/validate-container-engine/tasks/main.yml
+++ b/roles/container-engine/validate-container-engine/tasks/main.yml
@@ -90,6 +90,7 @@
import_role:
name: container-engine/containerd
tasks_from: reset
+ handlers_from: reset
vars:
service_name: containerd.service
when:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index a4d2baea7..871c42f64 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -110,35 +110,45 @@ calico_apiserver_version: "{{ calico_version }}"
typha_enabled: false
calico_apiserver_enabled: false
-flannel_version: "v0.18.1"
+flannel_version: "v0.19.2"
flannel_cni_version: "v1.1.0"
cni_version: "v1.1.1"
weave_version: 2.8.1
-pod_infra_version: "3.6"
-cilium_version: "v1.11.7"
+pod_infra_version: "3.7"
+
+cilium_version: "v1.12.1"
+cilium_enable_hubble: false
+
kube_ovn_version: "v1.9.7"
kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}"
kube_router_version: "v1.5.1"
multus_version: "v3.8-{{ image_arch }}"
-helm_version: "v3.9.2"
+helm_version: "v3.9.4"
nerdctl_version: "0.22.2"
krew_version: "v0.4.3"
+skopeo_version: v1.10.0
# Get kubernetes major version (i.e. 1.17.4 => 1.17)
kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}"
etcd_supported_versions:
+ v1.25: "v3.5.5"
v1.24: "v3.5.4"
v1.23: "v3.5.3"
- v1.22: "v3.5.3"
etcd_version: "{{ etcd_supported_versions[kube_major_version] }}"
crictl_supported_versions:
+ v1.25: "v1.25.0"
v1.24: "v1.24.0"
v1.23: "v1.23.0"
- v1.22: "v1.22.0"
crictl_version: "{{ crictl_supported_versions[kube_major_version] }}"
+crio_supported_versions:
+ v1.25: v1.25.1
+ v1.24: v1.24.3
+ v1.23: v1.23.2
+crio_version: "{{ crio_supported_versions[kube_major_version] }}"
+
# Download URLs
kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
@@ -149,6 +159,7 @@ calicoctl_download_url: "https://github.com/projectcalico/calico/releases/downlo
calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
+crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
@@ -161,34 +172,63 @@ nerdctl_download_url: "https://github.com/containerd/nerdctl/releases/download/v
krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
+skopeo_download_url: "https://github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
crictl_checksums:
arm:
+ v1.25.0: c4efe3649af5542f2b07cdfc0be62e9e13c7bb846a9b59d57e190c764f28dae4
v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89
v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae
- v1.22.0: b74f7cc52ce79c6d7fd776beb6353f4628e9c36f17ba2b8e6c48155714057f07
arm64:
+ v1.25.0: 651c939eca010bbf48cc3932516b194028af0893025f9e366127f5b50ad5c4f4
v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24
v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595
- v1.22.0: a713c37fade0d96a989bc15ebe906e08ef5c8fe5e107c2161b0665e9963b770e
amd64:
+ v1.25.0: 86ab210c007f521ac4cdcbcf0ae3fb2e10923e65f16de83e0e1db191a07f0235
v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880
v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8
- v1.22.0: 45e0556c42616af60ebe93bf4691056338b3ea0001c0201a6a8ff8b1dbc0652a
ppc64le:
+ v1.25.0: 1b77d1f198c67b2015104eee6fe7690465b8efa4675ea6b4b958c63d60a487e7
v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754
v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7
- v1.22.0: c78bcea20c8f8ca3be0762cca7349fd2f1df520c304d0b2ef5e8fa514f64e45f
+
+crio_archive_checksums:
+ arm:
+ v1.25.1: 0
+ v1.24.3: 0
+ v1.23.2: 0
+ arm64:
+ v1.25.1: add26675dc993b292024d007fd69980d8d1e75c675851d0cb687fe1dfd1f3008
+ v1.24.3: d8040602e03c90e4482b4ce97b63c2cf1301cd2afb0aa722342f40f3537a1a1f
+ v1.23.2: a866ccc3a062ac29906a619b9045a5e23b11fa9249f8802f8be0849491d01fbd
+ amd64:
+ v1.25.1: 49f98a38805740c40266a5bf3badc28e4ca725ccf923327c75c00fccc241f562
+ v1.24.3: 43f6e3a7ad6ae8cf05ed0f1e493578c28abf6a798aedb8ee9643ff7c25a68ca3
+ v1.23.2: 5c766dbf366a80f8b5dbc7a06d566f43e7cb0675186c50062df01f3b3cb5e526
+ ppc64le:
+ v1.25.1: 0
+ v1.24.3: 0
+ v1.23.2: 0
# Checksum
# Kubernetes versions above Kubespray's current target version are untested and should be used with caution.
kubelet_checksums:
arm:
+ v1.25.3: 9745a48340ca61b00f0094e4b8ff210839edcf05420f0d57b3cb1748cb887060
+ v1.25.2: 995f885543fa61a08bd4f1008ba6d7417a1c45bd2a8e0f70c67a83e53b46eea5
+ v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2
+ v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa
+ v1.24.7: 3841e80f54ee5576928e799e4962231261bcdafe94868a310a8782da9a321da5
+ v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4
+ v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87
v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd
v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799
v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22
v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec
v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3
+ v1.23.13: 58f744247dbc8bca50b01ec1c25b0b5868736319f9cc8bf964fc2c1dd9eef0f9
+ v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3
+ v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0
v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03
v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a
v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11
@@ -200,26 +240,22 @@ kubelet_checksums:
v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908
v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce
v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba
- v1.22.13: c2230f8ff03102502b6f9f10dcc494af6c536fd8f1f9467aa42ba684da4e9106
- v1.22.12: bb50b896769cb5e53101ef36e580095b8e546ea0dc194687e662824248b183ac
- v1.22.11: 528e01a436b1b91edaa192ecc6befff5f5a2e17f9f340e3f4908b8bed1cebbe9
- v1.22.10: 1510b508bd72c03f2576f07e652dfc0a12feda5a231a7dd792f32cd968153d8f
- v1.22.9: 99eb1607e30d855b14da6f4f21d00d09dc6477c3e3bc1e88d00dea7961f3a488
- v1.22.8: 7bc14bfca0efb5af6d7e56218f5c51862596cd9927843f8456a36e70e8e64da8
- v1.22.7: 3709a794b33081b3f5f5ff1c6f9ab1614c3723d1da0a31c74c37ccdec456e94f
- v1.22.6: 9957157375a343664db35be75281d610df85e1986a98cc3db1398bd0e53c36f4
- v1.22.5: d901629aa537a0bff0907557810adb3cdc4a31f58035c57a45be011d836e2c8f
- v1.22.4: 725a89d9752bbce91443b20108f784ae5efc950db26abb618eb4d0a2364b2ada
- v1.22.3: 1c08dbe0b90d5b03fa386fadf5fa1af4db8e41bf8fa77888d54119ff188d130d
- v1.22.2: 941e639b0f859eba65df0c66be82808ea6be697ed5dbf4df8e602dcbfa683aa3
- v1.22.1: f42bc00f274be7ce0578b359cbccc48ead03894b599f5bf4d10e44c305fbab65
- v1.22.0: 4354dc8db1d8ca336eb940dd73adcd3cf17cbdefbf11889602420f6ee9c6c4bb
arm64:
+ v1.25.3: 929d25fc3f901749b058141a9c624ff379759869e09df49b75657c0be3141091
+ v1.25.2: c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae
+ v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b
+ v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590
+ v1.24.7: d8bd38e595ca061c53d3b7d1daebe5b3cc1ad44c731666bd5e842d336077db4b
+ v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e
+ v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8
v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61
v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12
v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0
v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf
v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e
+ v1.23.13: 4e2297c9893d425bfcd80741b95fb1a5b59b4fd4f4bcf782ccab94760e653cdf
+ v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488
+ v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc
v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8
v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350
v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6
@@ -231,26 +267,22 @@ kubelet_checksums:
v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f
v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d
v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9
- v1.22.13: f8c1ec9fec6b36646ac05e1e26f0cd3e20395b500eca8ee3baeb3ca59935fdb0
- v1.22.12: 0e58133c153be32e8e61004cfdc18f8a02ef465f979c6d5bf3e998fbe3f89fca
- v1.22.11: d20398fa95ee724d63c3263af65eeb49e56c963fcace92efed2d2d0f6084c11a
- v1.22.10: 2376a7ecc044bc4b5cdae9a0a14d058ae5c1803450f3a8ffdce656785e9e251e
- v1.22.9: d7a692ee4f5f5929a15c61947ae2deecb71b0945461f6064ced83d13094028e8
- v1.22.8: 604c672908a3b3cbbcf9d109d8d5fef0879992ddcf0d3e0766079d3bb7d0ca3e
- v1.22.7: 8291d304c0ba4faec4336336d4cdd5159f5c90652b8b0d6be0cb5ce8f8bf92e3
- v1.22.6: fbb823fe82b16c6f37911e907d3e4921f4642d5d48eb60e56aba1d7be0665430
- v1.22.5: e68536cff9172d1562edddd7194d20302472a064009bf7c0ed8d79d030cb61aa
- v1.22.4: c0049ab240b27a9dd57be2bb98356c62582d975ba2f790a61b34f155b12ab7e6
- v1.22.3: d0570f09bd5137ff2f672a0b177a6b78fd294a42db21f094dc02c613436ce8d1
- v1.22.2: f5fe3d6f4b2df5a794ebf325dc17fcdfe905a188e25f7c7e47d9cd15f14f8c2d
- v1.22.1: d5ffd67d8285fb224a1c49622fd739131f7b941e3d68f233dec96e72c9ebee63
- v1.22.0: cea637a7da4f1097b16b0195005351c07032a820a3d64c3ff326b9097cfac930
amd64:
+ v1.25.3: d5c89c5e5dae6afa5f06a3e0e653ac3b93fa9a93c775a715531269ec91a54abe
+ v1.25.2: 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3
+ v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a
+ v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24
+ v1.24.7: 4d24c97c924c40971412cc497145ad823e4b7b87ccda97ebced375f7e886e9e2
+ v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b
+ v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631
v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4
v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11
v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343
v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde
v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52
+ v1.23.13: 4d8f796b82dbe2b89b6d587bfeedf66724526b211c75a53456d4ac4014e3dcca
+ v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614
+ v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7
v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b
v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387
v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e
@@ -262,26 +294,22 @@ kubelet_checksums:
v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6
v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4
v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518
- v1.22.13: f55a72f5546ecf463f54e9220a1c38179b94b32ba561dfd6ec1f2fbe8231d640
- v1.22.12: d54539bd0fa43b43e9ad2ac4e6644bcb3f1e98b8fc371befba7ac362d93a6b00
- v1.22.11: 50fb1ede16c15dfe0bcb9fa98148d969ae8efeb8b599ce5eb5f09ab78345c9d1
- v1.22.10: c1aa6e9f59cfc765d33b382f604140699ab97c9c4212a905d5e1bcd7ef9a5c8b
- v1.22.9: 61530a9e6a5cb1f971295de860a8ade29db65d0dff50d1ffff3de1155dfd0c02
- v1.22.8: 2e6d1774f18c4d4527c3b9197a64ea5705edcf1b547c77b3e683458d771f3ce7
- v1.22.7: cfc96b5f781bfbfdcb05115f4e26a5a6afc9d74bb4a5647c057b2c13086fb24d
- v1.22.6: 7b009835b0ab74aa16ebf57f5179893035e0cf5994e1bcf9b783275921a0393a
- v1.22.5: 2be340f236a25881969eaa7d58b2279a4e31dc393cab289a74c78c0c37ba2154
- v1.22.4: 8d014cfe511d8c0a127b4e65ae2a6e60db592f9b1b512bb822490ea35958b10d
- v1.22.3: 3f00a5f98cec024abace5bcc3580b80afc78181caf52e100fc800e588774d6eb
- v1.22.2: 0fd6572e24e3bebbfd6b2a7cb7adced41dad4a828ef324a83f04b46378a8cb24
- v1.22.1: 2079780ad2ff993affc9b8e1a378bf5ee759bf87fdc446e6a892a0bbd7353683
- v1.22.0: fec5c596f7f815f17f5d7d955e9707df1ef02a2ca5e788b223651f83376feb7f
ppc64le:
+ v1.25.3: 447a8b34646936bede22c93ca85f0a98210c9f61d6963a7d71f7f6a5152af1d1
+ v1.25.2: a45dc00ac3a8074c3e9ec6a45b63c0a654529a657d929f28bd79c550a0d213d7
+ v1.25.1: c1e3373ac088e934635fb13004a21ada39350033bfa0e4b258c114cb86b69138
+ v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491
+ v1.24.7: 621ce04d0cb1c66065303d062bf9ac248225b8428b1adbca3f6fa6dd2eda13cc
+ v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c
+ v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a
v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916
v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907
v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132
v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06
v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931
+ v1.23.13: 444c646dc94dd7f7541a91ddc16a0da7259e345e1f84ec648077f447626844a2
+ v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a
+ v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6
v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a
v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec
v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f
@@ -293,27 +321,23 @@ kubelet_checksums:
v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca
v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1
v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753
- v1.22.13: ac81fe025a69834f872d70d696472780e8e5713e0ca2450dcfc2cd9745b55239
- v1.22.12: 50e418ff8b8d1f4746be37d5658895dfcb892b0a3a8a2dd7320e760d4159826c
- v1.22.11: 48e6b0e8d4483e7ccce02dd658b4c92be6859bbb235c58e8902182503280a14c
- v1.22.10: da53b707bd5e8b4ae9e720a4e87892e4c0713dd419f0d66cade7e4619a3d8965
- v1.22.9: fd5be136a69e011ccb9d4482e4c13f23045e3c9c891e7e87394019f003f5cc79
- v1.22.8: 804c336a31dfce44330e358d6b30dd0056859c3edc2b9bf34672d327fa8a2e23
- v1.22.7: 3bfa04aa3a443aacdc6cf3b0a500317b5efa5cbdf4d9c343026be442120113b4
- v1.22.6: 4e64366b96abaf7b45d14c72f6f84fb51c84a66ea0f25e93e50f986e6af7d29e
- v1.22.5: 130ad083528ffaca317070828a308a5bb13e79309ec7e82bdf2bfa981a696a73
- v1.22.4: 4f71ef4083bff268c4007c2edc59cb89c1deb4e9f6ecd09fbe009644f3c059bd
- v1.22.3: 1d8bcb12d956512c2fb215e9161d4b196684a61836ce528e8bbde933ff36a8b5
- v1.22.2: 9b4e555110f747569393220ef12a54ae26eb4168eefb77d4b1e6c1d123f71438
- v1.22.1: a8c379fce4b1c1bc40238dfea67db286ec8ffec56ed701d581b53a941f7031bb
- v1.22.0: 957dcc6ae45078ce971af183c0061d60168c15f484dcd978588cc6380236423f
kubectl_checksums:
arm:
+ v1.25.3: 59e1dba0951f19d4d18eb04db50fcd437c1d57460f2008bc03e668f71b8ea685
+ v1.25.2: d6b581a41b010ef86a9364102f8612d2ee7fbc7dd2036e40ab7c85adb52331cb
+ v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc
+ v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131
+ v1.24.7: 1829c5bb2ef30df6e46f99aa5c87a0f510a809f9169c725b3da08455bcf7f258
+ v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de
+ v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f
v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a
v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612
v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a
v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f
v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc
+ v1.23.13: c32baf45ad141f967b4877c7151aeee1ae296eebdbcb7a5200d418bd77c284b2
+ v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9
+ v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7
v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec
v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80
v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f
@@ -325,26 +349,22 @@ kubectl_checksums:
v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173
v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5
v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644
- v1.22.13: 4228743e4e51403692cf9578b35f3550a769804011126a9be18536ac591e8dd2
- v1.22.12: 9aa6e8df0dc0c77fd546762ccc78c3f2d349049855c59b0699a3192621590754
- v1.22.11: 8e0c2a168aac356b3c84e9366ae19c26fc5ecd1344e3ef92f56377ec4ccddc3b
- v1.22.10: daadf5f7c66fdcf2aa62a8504606a058621146379ea1bb52159ea0b087b986b2
- v1.22.9: 4b45c5fb69e385f58293c5142d0ee51f79c3e3620a180632bd2370c01d0698e7
- v1.22.8: 08ffeb8924c315cd466fc930377ac545edd6ac4ebb8bf284218947256b6729f3
- v1.22.7: be9a833a6eae7ee7698ee5cc18bacc2652207af07528e60a78f43a8139fffbfc
- v1.22.6: a0dea833198a95ec85b4d55fe7e16333bcdc6a93290238c7473887e7e06f23a7
- v1.22.5: 6db514e45f62f611d7e5f862c1eec6009e6de07852cf3cbc37309db1ed76920f
- v1.22.4: 9ea171e868aeb64b187a039edd79b2c7ea2bedbd752c76e1c5e44c2486d21f72
- v1.22.3: 28e2817751c94940469755911fe3d6a93e288391377f5bb8db08cffa538e72fa
- v1.22.2: a16f7d70e65589d2dbd5d4f2115f6ccd4f089fe17a2961c286b809ad94eb052a
- v1.22.1: 50991ec4313ee42da03d60e21b90bc15e3252c97db189d1b66aad5bbb555997b
- v1.22.0: 6d7c787416a148acffd49746837df4cebb1311c652483dc3d2c8d24ce1cc897e
arm64:
+ v1.25.3: cfd5092ce347a69fe49c93681a164d9a8376d69eef587da894207c62ec7d6a5d
+ v1.25.2: b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5
+ v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea
+ v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d
+ v1.24.7: 4b138a11b13210ce1731e06918f8fff6709c004c6fb6bec28544713854de9fe8
+ v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219
+ v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a
v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61
v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a
v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79
v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271
v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d
+ v1.23.13: 950626ae35fca6c26096f97cac839d76e2f29616048ad30cec68f1ff003840f2
+ v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437
+ v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3
v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b
v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21
v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890
@@ -356,26 +376,22 @@ kubectl_checksums:
v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b
v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524
v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe
- v1.22.13: e3e845bac0e1c30de20438433a8d75c64c237892245887a2818bd877b9601b41
- v1.22.12: 7d6507ecb8061f7d94d1bd6b982c56b1a1f929427bcc27a962fe66c61100f12a
- v1.22.11: 35da77af0581740aa8815c461ee912181fbb4cec09c2e0c9f6dbee58a48758a6
- v1.22.10: 6ce1a1315225d7d62f7d17083c9f87d4f3f5684c80da108799c99780ad520cb3
- v1.22.9: 33724bed4dddf4d8ecd6ae75667552d121e2fb575ff2db427ce66516e048edac
- v1.22.8: 48105735b74e941a84dec6bd53637c023ad53dc5fadd9bf616347cb339c76b47
- v1.22.7: 44342131947bc61e6b03103e7e1302d16fa3e5b2e2cd67e27194f66223ecf798
- v1.22.6: b43199fe66a58f292f2c685b922330819190eb22ac41cc5c10c33fdf9f2bbc29
- v1.22.5: a122ef299d75c0bec1dc1e28670d358e13743144e68223c8178462ba5c436e1d
- v1.22.4: 3fcec0284c0fdfc22e89a5b73ebd7f51120cc3505a11a4f6d6f819d46a40b26a
- v1.22.3: ebeac516cc073cfe9550f114ca326f762d958cb91a33c8c9d03ede6ba94a6088
- v1.22.2: c5bcc7e5321d34ac42c4635ad4f6fe8bd4698e9c879dc3367be542a0b301297b
- v1.22.1: 5c7ef1e505c35a8dc0b708f6b6ecdad6723875bb85554e9f9c3fe591e030ae5c
- v1.22.0: 8d9cc92dcc942f5ea2b2fc93c4934875d9e0e8ddecbde24c7d4c4e092cfc7afc
amd64:
+ v1.25.3: f57e568495c377407485d3eadc27cda25310694ef4ffc480eeea81dea2b60624
+ v1.25.2: 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb
+ v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b
+ v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885
+ v1.24.7: 2d88e56d668b1d7575b4783f22d512e94da432f42467c3aeac8a300b6345f12d
+ v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c
+ v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed
v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8
v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1
v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542
v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a
v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7
+ v1.23.13: fae6957e6a7047ad49cdd20976cd2ce9188b502c831fbf61f36618ea1188ba38
+ v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9
+ v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e
v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7
v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce
v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894
@@ -387,26 +403,22 @@ kubectl_checksums:
v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde
v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e
v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f
- v1.22.13: b96d2bc9137ec63546a29513c40c5d4f74e9f89aa11edc15e3c2f674d5fa3e02
- v1.22.12: 8e36c8fa431e454e3368c6174ce3111b7f49c28feebdae6801ab3ca45f02d352
- v1.22.11: a61c697e3c9871da7b609511248e41d9c9fb6d9e50001425876676924761586b
- v1.22.10: 225bc8d4ac86e3a9e36b85d2d9cb90cd4b4afade29ba0292f47834ecf570abf2
- v1.22.9: ae6a9b585f9a366d24bb71f508bfb9e2bb90822136138109d3a91cd28e6563bb
- v1.22.8: 761bf1f648056eeef753f84c8365afe4305795c5f605cd9be6a715483fe7ca6b
- v1.22.7: 4dd14c5b61f112b73a5c9c844011a7887c4ffd6b91167ca76b67197dee54d388
- v1.22.6: 1ab07643807a45e2917072f7ba5f11140b40f19675981b199b810552d6af5c53
- v1.22.5: fcb54488199c5340ff1bc0e8641d0adacb27bb18d87d0899a45ddbcc45468611
- v1.22.4: 21f24aa723002353eba1cc2668d0be22651f9063f444fd01626dce2b6e1c568c
- v1.22.3: 0751808ca8d7daba56bf76b08848ef5df6b887e9d7e8a9030dd3711080e37b54
- v1.22.2: aeca0018958c1cae0bf2f36f566315e52f87bdab38b440df349cd091e9f13f36
- v1.22.1: 78178a8337fc6c76780f60541fca7199f0f1a2e9c41806bded280a4a5ef665c9
- v1.22.0: 703e70d49b82271535bc66bc7bd469a58c11d47f188889bd37101c9772f14fa1
ppc64le:
+ v1.25.3: bd59ac682fffa37806f768328fee3cb791772c4a12bcb155cc64b5c81b6c47ce
+ v1.25.2: 1e3665de15a591d52943e6417f3102b5d413bc1d86009801ad0def04e8c920c5
+ v1.25.1: 957170066abc4d4c178ac8d84263a191d351e98978b86b0916c1b8c061da8282
+ v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513
+ v1.24.7: a68ec0c8ed579324037fc0a3bafa9d10184e6ff3ca34bfffdcb78f9f02bcb765
+ v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d
+ v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98
v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44
v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e
v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f
v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5
v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75
+ v1.23.13: 785d620dc77d10ce49218894225e935e55d08bb3842ae75c11cb41a814aca9ea
+ v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a
+ v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7
v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232
v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67
v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f
@@ -418,27 +430,23 @@ kubectl_checksums:
v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015
v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a
v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851
- v1.22.13: fd4a8473a57275579eedd64a5d13aabf801cddef9f4a81f11658c40b19f559da
- v1.22.12: 3855d0a2add2a093772cb024b3cf678ddfa840b4a764f925b0c58ff94aaf13ee
- v1.22.11: e74b2c62c524b81e22a5e66bf2abe2f036d26bb541663a4383abd6655d365288
- v1.22.10: 98226e40cd93c7a23bf3dde675879207d393d886e53d0e3dfdf8a2732307711c
- v1.22.9: 4ac554b2eb811c10276761ec185e1dbd96b24df4ed141159960c2325d6451f6e
- v1.22.8: 30d5cba5bdee3bb9395a988867a161ff52e7dc01a40cd4fa2a2adb1c08b76227
- v1.22.7: b25bcc11619ea61a60a1cfa8bfd4ef15ccb8db008251013b3473cc04082754bc
- v1.22.6: d9acb45bcbcead81e8f61572dd800d82e605af2532edb4be1633b732b009d2e2
- v1.22.5: a613f330b10b24992780149184ea3257210932ea9f569af323f84e9debfea535
- v1.22.4: a89d158be97df1f7b4d56ed28b219c8f09427511283b78917352b9e90b9f37bf
- v1.22.3: b55409b40c60fddf24e6e93cbcee2e33c3c5d8f4a6b3f9c8cf4eb1f23119388d
- v1.22.2: f8c8c4734846c56a8eae6e5c877c84e38513337ea1ca08d63e019ffe82114342
- v1.22.1: 4eced82fb83c405937c35c18de5ac25befa68ca5ab016b3d279011d7f3701eea
- v1.22.0: 7ea30171a5db9dfbdc240674f5cde00fb75a8193ef73783950b8d10c810b6a5b
kubeadm_checksums:
arm:
+ v1.25.3: 3f357e1e57936ec7812d35681be249b079bbdc1c7f13a75e6159379398e37d5e
+ v1.25.2: 2f794569c3322bb66309c7f67126b7f88155dfb1f70eea789bec0edf4e10015e
+ v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588
+ v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0
+ v1.24.7: c0a9e6c08cad0b727f06bb3b539d55c65ea977be68fe471f6a9f73af3fbcb275
+ v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a
+ v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7
v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4
v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e
v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266
v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153
v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2
+ v1.23.13: 54d0f4d7a65abf610606b0538005ab5f177566587a81af6b0bc24ded2f8e305c
+ v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c
+ v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622
v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f
v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002
v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb
@@ -450,26 +458,22 @@ kubeadm_checksums:
v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e
v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369
v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda
- v1.22.13: dc8cb74f5f427958eda265c8190c2f12877e71eb4f04269dd85dfa86a8044208
- v1.22.12: d2d1f19c74186e9247cea9ff9ba484a658bd4985060979babe5c28389e594d0a
- v1.22.11: b2a5a1c827fe18f4589628cdb69e73c1e65011381ec015e1daa7a31198199302
- v1.22.10: f1ab42fbadb0a66ba200392ee82c05b65e3d29a3d8f3e030b774cbc48915dedb
- v1.22.9: f68ca35fc71691e599d4913de58b6d77abcb2d27c324abc23388b4383b5299ea
- v1.22.8: f55fce83ae69b0f660a0fbdd2d05681d2e29a1119d7cce890fe1f50724bdcc60
- v1.22.7: 26b3d79d88e81bf354d716fa48210b0358d2f6ca99cba06eb7640ac1e32724b8
- v1.22.6: ad23ad06e83f2466f78652221f73fd58d23d6122b3395c24d9a3be779f6afa49
- v1.22.5: f0c95c9b86287ec8570388f8fc26ad05ac342f69876a08cb6cb5aa2ffcc1febd
- v1.22.4: 5e52ee3c3f0f5bffd9f0d9e7b3e215b5ab239feb425d47d8bd609bd4b1fb1d61
- v1.22.3: d3c76311c582e48889bdb3e3ef1948ce0292983a0c13d37c7e8ae5c6024291f5
- v1.22.2: 6ccc26494160e19468b0cb55d56b2d5c62d21424fac79cb66402224c2bf73a0d
- v1.22.1: cc08281c5261e860df9a0b5040b8aa2e6d202a243daf25556f5f6d3fd8f2e1e9
- v1.22.0: 6a002deb0ee191001d5c0e0435e9a995d70aa376d55075c5f61e70ce198433b8
arm64:
+ v1.25.3: 61bb61eceff78b44be62a12bce7c62fb232ce1338928e4207deeb144f82f1d06
+ v1.25.2: 437dc97b0ca25b3fa8d74b39e4059a77397b55c1a6d16bddfd5a889d91490ce0
+ v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0
+ v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b
+ v1.24.7: ee946d82173b63f69be9075e218250d4ab1deec39d17d600b16b6743e5dca289
+ v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41
+ v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2
v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6
v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5
v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548
v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04
v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004
+ v1.23.13: 462971d5822c91598754dfaa9c4c8d46a8c74aefef0f4dbbc8be31c4f0d18855
+ v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947
+ v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb
v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c
v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113
v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22
@@ -481,26 +485,22 @@ kubeadm_checksums:
v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff
v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40
v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6
- v1.22.13: 2c42aadc99b46b6b5684acc7dfa630c67cb12c19b17df4cea3d2091ef5753011
- v1.22.12: d0469a3008411edb50f6562e00f1df28123cf2dc368f1538f1b41e27b0482b1c
- v1.22.11: 15e1cba65f0db4713bf45ee23dbd01dd30048d20ad97ef985d6b9197f8ae359a
- v1.22.10: 8ea22a05b428de70a430711e8f75553e1be2925977ab773b5be1c240bc5b9fcd
- v1.22.9: 0168c60d1997435b006b17c95a1d42e55743048cc50ee16c8774498aa203a202
- v1.22.8: 67f09853d10434347eb75dbb9c63d57011ba3e4f7e1b320a0c30612b8185be8c
- v1.22.7: 2ae0287769a70f442757e49af0ecd9ca2c6e5748e8ba72cb822d669a7aeeb8fa
- v1.22.6: bc10e4fb42a182515f4232205bea53f90270b8f80ec1a6c1cc3301bff05e86b7
- v1.22.5: 47aa54533289277ac13419c16ffd1a2c35c7af2d6a571261e3d728990bc5fc7d
- v1.22.4: 3dfb128e108a3f07c53cae777026f529784a057628c721062d8fdd94b6870b69
- v1.22.3: dcd1ecfb7f51fb3929b9c63a984b00cf6baa6136e1d58f943ee2c9a47af5875d
- v1.22.2: 77b4c6a56ae0ec142f54a6f5044a7167cdd7193612b04b77bf433ffe1d1918ef
- v1.22.1: 85df7978b2e5bb78064ed0bcce14a39d105a1a3968bb92ee5d2f96a1fa09ed12
- v1.22.0: 9fc14b993de2c275b54445255d7770bd1d6cdb49f4cf9c227c5b035f658a2351
amd64:
+ v1.25.3: 01b59ce429263c62b85d2db18f0ccdef076b866962ed63971ff2bd2864deea7b
+ v1.25.2: 63ee3de0c386c6f3c155874b46b07707cc72ce5b9e23f336befd0b829c1bd2ad
+ v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420
+ v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8
+ v1.24.7: 8b67319d28bf37e8e7c224954dc778cbe946f2bb0ed86975d8caa83d51c955ee
+ v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c
+ v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb
v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c
v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff
v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712
v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0
v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318
+ v1.23.13: ff86af2b5fa979234dd3f9e7b04ec7d3017239a58417397153726d8077c4ac89
+ v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83
+ v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692
v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64
v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296
v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89
@@ -512,26 +512,22 @@ kubeadm_checksums:
v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219
v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34
v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0
- v1.22.13: acbb0dd67b7656d0c70049484ba31c1981b803be0ae8f430dacad67e3e06c121
- v1.22.12: 9410dcff069993caa7dfe783d35ac2d929ec258a2c3a4f0c3f269f1091931263
- v1.22.11: da3594b4e905627fd5c158531280e40a71dadf44f1f0b6c061a1b729a898dd9b
- v1.22.10: df5e090a3c0e24b92b26f22f1d7689b6ea860099ea89b97edf5d4c19fa6da0ca
- v1.22.9: e3061f3a9c52bff82ae740c928fe389a256964a5756d691758bf3611904d7183
- v1.22.8: fc10b4e5b66c9bfa6dc297bbb4a93f58051a6069c969905ef23c19680d8d49dc
- v1.22.7: 7e4be37fc5ddeeae732886bf83c374198813e76d84ed2f6590145e08ece1a8b2
- v1.22.6: 0bf8e47ad91215cd8c5e0ded565645aeb1ad6f0a9223a2486eb913bff929d472
- v1.22.5: a512be0fa429f43d3457472efd73529cd2ba2cd54ef714faf6b69486beea054f
- v1.22.4: 33b799df2941f12a53ffe995d86a385c35d3c543f9d2c00c0cdb47ec91a98c5c
- v1.22.3: 3964e6fd46052eb4a9672421d8e8ce133b83b45abb77481b688dc6375390e480
- v1.22.2: 4ff09d3cd2118ee2670bc96ed034620a9a1ea6a69ef38804363d4710a2f90d8c
- v1.22.1: 50a5f0d186d7aefae309539e9cc7d530ef1a9b45ce690801655c2bee722d978c
- v1.22.0: 90a48b92a57ff6aef63ff409e2feda0713ca926b2cd243fe7e88a84c483456cc
ppc64le:
+ v1.25.3: 8fe9a69db91c779a8f29b216134508ba49f999fa1e36b295b99444f31266da17
+ v1.25.2: a53101ed297299bcf1c4f44ec67ff1cb489ab2d75526d8be10c3068f161601a7
+ v1.25.1: c7e2c8d2b852e1b30894b64875191ce388a3a416d41311b21f2d8594872fe944
+ v1.25.0: 31bc72e892f3a6eb5db78003d6b6200ba56da46a746455991cb422877afc153d
+ v1.24.7: 29a53be9a74dcb01ea68b0a385bdd9b510f9792955f9f7c93ed608c851b5dc32
+ v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15
+ v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a
v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e
v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2
v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb
v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137
v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c
+ v1.23.13: 3dbf72fdfc108bf41cab151ac340b336ba17b14fa008b15d84ce223b30391914
+ v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d
+ v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22
v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1
v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe
v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af
@@ -543,20 +539,6 @@ kubeadm_checksums:
v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01
v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc
v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1
- v1.22.13: 066051f2efb29656a04dbb6a378b813779fedacbf3be7034286b07ad43e364c7
- v1.22.12: 70c14af98ecaa5d4ac234c827a560df9a020b346af250b6fb8ac9e50943486d3
- v1.22.11: b2a8d92de208b66e3c2bd03521e26cf84a3977c74242e4f0e6724bdebd861326
- v1.22.10: f74feaf8ea42145a668111733e8ed55a05d062ca40b0281851c2c48d28b74468
- v1.22.9: aca9539afc208343b0138d2e9e56b018ea782b74068389e7381e1c361f584446
- v1.22.8: 715dcac3dc5055306fc9b56352f5323df7947479c831993fecadc3a7c9072071
- v1.22.7: 1496cb57091c6189728f295fbc6f8ea944f08fa9f844d917f7f7ca1a3b896acb
- v1.22.6: a3aed2613b0566d1c829c15ff1206c25743bade24c4087b039824860d07de517
- v1.22.5: d877c380f3fe4ee3c68f02ffa185252129aaba390129fd6a3542f6d9c5e88a6f
- v1.22.4: 3e4cc45da8067f0da56e848b39531874c0a144840f4794731a4fb3b4689a8de2
- v1.22.3: f993698da6f64a222edb92e352331c46516dbef9e235b12471c9d697aac74524
- v1.22.2: 115bdf1e9e4821cf02aa77875930b4640cfba6b3560492ac75fe6159e897be6f
- v1.22.1: 45e5145abf4700ddb5de3469ddb6b316e7588595e4a3e64f44064738808b9c97
- v1.22.0: 22a7d995e78e93abca2999c911b065d63f51f33982dc305f23762a8d7c045d25
etcd_binary_checksums:
# Etcd does not have arm32 builds at the moment, having some dummy value is
@@ -564,15 +546,19 @@ etcd_binary_checksums:
arm:
v3.5.3: 0
v3.5.4: 0
+ v3.5.5: 0
arm64:
v3.5.3: 8b00f2f51568303799368ee4a3c9b9ff8a3dd9f8b7772c4f6589e46bc62f7115
v3.5.4: 8e9c2c28ed6b35f36fd94300541da10e1385f335d677afd8efccdcba026f1fa7
+ v3.5.5: a8d177ae8ecfd1ef025c35ac8c444041d14e67028c1a7b4eda3a69a8dee5f9c3
amd64:
v3.5.3: e13e119ff9b28234561738cd261c2a031eb1c8688079dcf96d8035b3ad19ca58
v3.5.4: b1091166153df1ee0bb29b47fb1943ef0ddf0cd5d07a8fe69827580a08134def
+ v3.5.5: 7910a2fdb1863c80b885d06f6729043bff0540f2006bf6af34674df2636cb906
ppc64le:
v3.5.3: f14154897ca5ad4698383b4c197001340fbe467525f6fab3b89ee8116246480f
v3.5.4: 2f0389caed87c2504ffc5a07592ca2a688dee45d599073e5f977d9ce75b5f941
+ v3.5.5: 08422dffd5749f0a5f18bd820241d751e539a666af94251c3715cba8f4702c42
cni_binary_checksums:
arm:
@@ -645,13 +631,13 @@ krew_archive_checksums:
helm_archive_checksums:
arm:
- v3.9.2: fb9f0c1c9475c66c2b3579b908c181d519761bbfae963ffac860bc683a2253de
+ v3.9.4: 18ce0f79dcd927fea5b714ca03299929dad05266192d4cde3de6b4c4d4544249
arm64:
- v3.9.2: e4e2f9aad786042d903534e3131bc5300d245c24bbadf64fc46cca1728051dbc
+ v3.9.4: d24163e466f7884c55079d1050968e80a05b633830047116cdfd8ae28d35b0c0
amd64:
- v3.9.2: 3f5be38068a1829670440ccf00b3b6656fd90d0d9cfd4367539f3b13e4c20531
+ v3.9.4: 31960ff2f76a7379d9bac526ddf889fb79241191f1dbe2a24f7864ddcb3f6560
ppc64le:
- v3.9.2: 85ae9bc357095917cdb2d801b7eb62926f3fed6c2dcf07e1280809ad2af3daa9
+ v3.9.4: c63a951415c192397fda07c2f52aa60639b280920381c48d58be6803eb0c22f9
cri_dockerd_archive_checksums:
arm:
@@ -855,6 +841,16 @@ containerd_archive_checksums:
1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9
1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8
+skopeo_binary_checksums:
+ arm:
+ v1.10.0: 0
+ arm64:
+ v1.10.0: 3bfc344d4940df29358f8056de7b8dd488b88a5d777b3106748ba66851fa2c58
+ amd64:
+ v1.10.0: 20fbd1bac1d33768c3671e4fe9d90c5233d7e13a40e4935b4b24ebc083390604
+ ppc64l3:
+ v1.10.0: 0
+
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}"
cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}"
kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}"
@@ -863,6 +859,7 @@ kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}"
calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}"
calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}"
crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}"
+crio_archive_checksum: "{{ crio_archive_checksums[image_arch][crio_version] }}"
cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}"
helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}"
runc_binary_checksum: "{{ runc_checksums[image_arch][runc_version] }}"
@@ -874,6 +871,7 @@ gvisor_containerd_shim_binary_checksum: "{{ gvisor_containerd_shim_binary_checks
nerdctl_archive_checksum: "{{ nerdctl_archive_checksums[image_arch][nerdctl_version] }}"
krew_archive_checksum: "{{ krew_archive_checksums[host_os][image_arch][krew_version] }}"
containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][containerd_version] }}"
+skopeo_binary_checksum: "{{ skopeo_binary_checksums[image_arch][skopeo_version] }}"
# Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
@@ -949,7 +947,7 @@ haproxy_image_tag: 2.6.1-alpine
# Coredns version should be supported by corefile-migration (or at least work with)
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
-coredns_version: "{{ 'v1.8.6' if (kube_version is version('v1.23.0','>=')) else 'v1.8.0' }}"
+coredns_version: "v1.8.6"
coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}"
coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
@@ -982,12 +980,12 @@ local_path_provisioner_version: "v0.0.22"
local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}"
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
-ingress_nginx_controller_image_tag: "v1.3.0"
+ingress_nginx_controller_image_tag: "v1.4.0"
ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen"
-ingress_nginx_kube_webhook_certgen_imae_tag: "v1.1.1"
+ingress_nginx_kube_webhook_certgen_imae_tag: "v1.3.0"
alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller"
alb_ingress_image_tag: "v1.1.9"
-cert_manager_version: "v1.9.0"
+cert_manager_version: "v1.9.1"
cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector"
@@ -1009,9 +1007,9 @@ csi_livenessprobe_image_repo: "{{ kube_image_repo }}/sig-storage/livenessprobe"
csi_livenessprobe_image_tag: "v2.5.0"
snapshot_controller_supported_versions:
+ v1.25: "v4.2.1"
v1.24: "v4.2.1"
v1.23: "v4.2.1"
- v1.22: "v4.2.1"
snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller"
snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}"
@@ -1156,6 +1154,19 @@ downloads:
groups:
- k8s_cluster
+ crio:
+ file: true
+ enabled: "{{ container_manager == 'crio' }}"
+ version: "{{ crio_version }}"
+ dest: "{{ local_release_dir }}/cri-o.{{ image_arch }}.{{ crio_version }}tar.gz"
+ sha256: "{{ crio_archive_checksum }}"
+ url: "{{ crio_download_url }}"
+ unarchive: true
+ owner: "root"
+ mode: "0755"
+ groups:
+ - k8s_cluster
+
cri_dockerd:
file: true
enabled: "{{ container_manager == 'docker' }}"
@@ -1275,6 +1286,19 @@ downloads:
groups:
- k8s_cluster
+ skopeo:
+ file: true
+ enabled: "{{ container_manager == 'crio' }}"
+ version: "{{ skopeo_version }}"
+ dest: "{{ local_release_dir }}/skopeo"
+ sha256: "{{ skopeo_binary_checksum }}"
+ url: "{{ skopeo_download_url }}"
+ unarchive: false
+ owner: "root"
+ mode: "0755"
+ groups:
+ - kube_control_plane
+
cilium:
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
container: true
@@ -1302,6 +1326,51 @@ downloads:
groups:
- k8s_cluster
+ cilium_hubble_relay:
+ enabled: "{{ cilium_enable_hubble }}"
+ container: true
+ repo: "{{ cilium_hubble_relay_image_repo }}"
+ tag: "{{ cilium_hubble_relay_image_tag }}"
+ sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}"
+ groups:
+ - k8s_cluster
+
+ cilium_hubble_certgen:
+ enabled: "{{ cilium_enable_hubble }}"
+ container: true
+ repo: "{{ cilium_hubble_certgen_image_repo }}"
+ tag: "{{ cilium_hubble_certgen_image_tag }}"
+ sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}"
+ groups:
+ - k8s_cluster
+
+ cilium_hubble_ui:
+ enabled: "{{ cilium_enable_hubble }}"
+ container: true
+ repo: "{{ cilium_hubble_ui_image_repo }}"
+ tag: "{{ cilium_hubble_ui_image_tag }}"
+ sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}"
+ groups:
+ - k8s_cluster
+
+ cilium_hubble_ui_backend:
+ enabled: "{{ cilium_enable_hubble }}"
+ container: true
+ repo: "{{ cilium_hubble_ui_backend_image_repo }}"
+ tag: "{{ cilium_hubble_ui_backend_image_tag }}"
+ sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}"
+ groups:
+ - k8s_cluster
+
+ cilium_hubble_envoy:
+ enabled: "{{ cilium_enable_hubble }}"
+ container: true
+ repo: "{{ cilium_hubble_envoy_image_repo }}"
+ tag: "{{ cilium_hubble_envoy_image_tag }}"
+ sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}"
+ groups:
+ - k8s_cluster
+
multus:
enabled: "{{ kube_network_plugin_multus }}"
container: true
@@ -1495,7 +1564,7 @@ downloads:
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
groups:
- - kube_control_plane
+ - k8s_cluster
nodelocaldns:
enabled: "{{ enable_nodelocaldns }}"
@@ -1731,7 +1800,7 @@ downloads:
- kube_control_plane
metallb_speaker:
- enabled: "{{ metallb_enabled }}"
+ enabled: "{{ metallb_speaker_enabled }}"
container: true
repo: "{{ metallb_speaker_image_repo }}"
tag: "{{ metallb_version }}"
diff --git a/roles/download/tasks/set_container_facts.yml b/roles/download/tasks/set_container_facts.yml
index 5890e6c82..9d36c2484 100644
--- a/roles/download/tasks/set_container_facts.yml
+++ b/roles/download/tasks/set_container_facts.yml
@@ -35,8 +35,8 @@
- name: Set image save/load command for crio
set_fact:
- image_save_command: "skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }}"
- image_load_command: "skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }}"
+ image_save_command: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null"
+ image_load_command: "{{ bin_dir }}/skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }} 2>/dev/null"
when: container_manager == 'crio'
- name: Set image save/load command for docker on localhost
@@ -51,5 +51,5 @@
- name: Set image save/load command for crio on localhost
set_fact:
- image_save_command_on_localhost: "skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }}"
+ image_save_command_on_localhost: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null"
when: container_manager_on_localhost == 'crio'
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 1f11e8ddc..bf38acee5 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -66,7 +66,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
etcd_blkio_weight: 1000
-etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}"
+etcd_node_cert_hosts: "{{ groups['k8s_cluster'] }}"
etcd_compaction_retention: "8"
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index ed0580b55..c688c16d8 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -33,14 +33,13 @@
stat:
path: "{{ etcd_cert_dir }}/{{ item }}"
register: etcd_node_certs
- when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
- inventory_hostname in groups['k8s_cluster'])
+ when: inventory_hostname in groups['k8s_cluster']
with_items:
- ca.pem
- node-{{ inventory_hostname }}.pem
- node-{{ inventory_hostname }}-key.pem
-- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node"
+- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)"
set_fact:
gen_certs: true
when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
@@ -56,13 +55,39 @@
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
{% endfor %}
- {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %}
+ {% set k8s_nodes = groups['kube_control_plane'] %}
{% for host in k8s_nodes %}
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
+- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)"
+ set_fact:
+ gen_certs: true
+ run_once: true
+ with_items: "{{ expected_files }}"
+ vars:
+ expected_files: >-
+ ['{{ etcd_cert_dir }}/ca.pem',
+ {% set etcd_members = groups['etcd'] %}
+ {% for host in etcd_members %}
+ '{{ etcd_cert_dir }}/admin-{{ host }}.pem',
+ '{{ etcd_cert_dir }}/admin-{{ host }}-key.pem',
+ '{{ etcd_cert_dir }}/member-{{ host }}.pem',
+ '{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
+ {% endfor %}
+ {% set k8s_nodes = groups['k8s_cluster']|unique|sort %}
+ {% for host in k8s_nodes %}
+ '{{ etcd_cert_dir }}/node-{{ host }}.pem',
+ '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
+ {% if not loop.last %}{{','}}{% endif %}
+ {% endfor %}]
+ when:
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
+
- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
set_fact:
gen_master_certs: |-
@@ -89,7 +114,7 @@
set_fact:
gen_node_certs: |-
{
- {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%}
+ {% set k8s_nodes = groups['k8s_cluster'] -%}
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
{% for host in k8s_nodes -%}
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
@@ -125,8 +150,7 @@
set_fact:
kubernetes_host_requires_sync: true
when:
- - (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
- inventory_hostname in groups['k8s_cluster']) and
+ - inventory_hostname in groups['k8s_cluster'] and
inventory_hostname not in groups['etcd']
- (not etcd_node_certs.results[0].stat.exists|default(false)) or
(not etcd_node_certs.results[1].stat.exists|default(false)) or
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index fb619bdb0..eb97a824d 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -38,7 +38,7 @@
- gen_certs|default(false)
- inventory_hostname == groups['etcd'][0]
-- name: Gen_certs | run cert generation script
+- name: Gen_certs | run cert generation script for etcd and kube control plane nodes
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment:
- MASTERS: "{% for m in groups['etcd'] %}
@@ -46,7 +46,7 @@
{{ m }}
{% endif %}
{% endfor %}"
- - HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
+ - HOSTS: "{% for h in groups['kube_control_plane'] %}
{% if gen_node_certs[h] %}
{{ h }}
{% endif %}
@@ -56,7 +56,23 @@
when: gen_certs|default(false)
notify: set etcd_secret_changed
-- name: Gen_certs | Gather etcd member and admin certs from first etcd node
+- name: Gen_certs | run cert generation script for all clients
+ command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
+ environment:
+ - HOSTS: "{% for h in groups['k8s_cluster'] %}
+ {% if gen_node_certs[h] %}
+ {{ h }}
+ {% endif %}
+ {% endfor %}"
+ run_once: yes
+ delegate_to: "{{ groups['etcd'][0] }}"
+ when:
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - gen_certs|default(false)
+ notify: set etcd_secret_changed
+
+- name: Gen_certs | Gather etcd member/admin and kube_control_plane clinet certs from first etcd node
slurp:
src: "{{ item }}"
register: etcd_master_certs
@@ -69,6 +85,10 @@
'{{ etcd_cert_dir }}/member-{{ node }}.pem',
'{{ etcd_cert_dir }}/member-{{ node }}-key.pem',
{% endfor %}]"
+ - "[{% for node in (groups['kube_control_plane']) %}
+ '{{ etcd_cert_dir }}/node-{{ node }}.pem',
+ '{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
+ {% endfor %}]"
delegate_to: "{{ groups['etcd'][0] }}"
when:
- inventory_hostname in groups['etcd']
@@ -76,7 +96,7 @@
- inventory_hostname != groups['etcd'][0]
notify: set etcd_secret_changed
-- name: Gen_certs | Write etcd member and admin certs to other etcd nodes
+- name: Gen_certs | Write etcd member/admin and kube_control_plane clinet certs to other etcd nodes
copy:
dest: "{{ item.item }}"
content: "{{ item.content | b64decode }}"
@@ -96,7 +116,7 @@
src: "{{ item }}"
register: etcd_master_node_certs
with_items:
- - "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
+ - "[{% for node in groups['k8s_cluster'] %}
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
{% endfor %}]"
@@ -104,6 +124,8 @@
when:
- inventory_hostname in groups['etcd']
- inventory_hostname != groups['etcd'][0]
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
notify: set etcd_secret_changed
- name: Gen_certs | Write node certs to other etcd nodes
@@ -117,47 +139,21 @@
when:
- inventory_hostname in groups['etcd']
- inventory_hostname != groups['etcd'][0]
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
loop_control:
label: "{{ item.item }}"
-- name: Gen_certs | Set cert names per node
- set_fact:
- my_etcd_node_certs: [ 'ca.pem',
- 'node-{{ inventory_hostname }}.pem',
- 'node-{{ inventory_hostname }}-key.pem']
- tags:
- - facts
-
-- name: "Check_certs | Set 'sync_certs' to true on nodes"
- set_fact:
- sync_certs: true
- when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
- inventory_hostname in groups['k8s_cluster']) and
- inventory_hostname not in groups['etcd']
- with_items:
- - "{{ my_etcd_node_certs }}"
-
-- name: Gen_certs | Gather node certs
- shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
- args:
- executable: /bin/bash
- warn: false
- no_log: "{{ not (unsafe_show_logs|bool) }}"
- register: etcd_node_certs
- check_mode: no
- delegate_to: "{{ groups['etcd'][0] }}"
- when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
- inventory_hostname in groups['k8s_cluster']) and
+- include_tasks: gen_nodes_certs_script.yml
+ when:
+ - inventory_hostname in groups['kube_control_plane'] and
sync_certs|default(false) and inventory_hostname not in groups['etcd']
-- name: Gen_certs | Copy certs on nodes
- shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
- args:
- executable: /bin/bash
- no_log: "{{ not (unsafe_show_logs|bool) }}"
- changed_when: false
- when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
- inventory_hostname in groups['k8s_cluster']) and
+- include_tasks: gen_nodes_certs_script.yml
+ when:
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - inventory_hostname in groups['k8s_cluster'] and
sync_certs|default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | check certificate permissions
diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml
new file mode 100644
index 000000000..d176e01aa
--- /dev/null
+++ b/roles/etcd/tasks/gen_nodes_certs_script.yml
@@ -0,0 +1,32 @@
+---
+- name: Gen_certs | Set cert names per node
+ set_fact:
+ my_etcd_node_certs: [ 'ca.pem',
+ 'node-{{ inventory_hostname }}.pem',
+ 'node-{{ inventory_hostname }}-key.pem']
+ tags:
+ - facts
+
+- name: "Check_certs | Set 'sync_certs' to true on nodes"
+ set_fact:
+ sync_certs: true
+ with_items:
+ - "{{ my_etcd_node_certs }}"
+
+- name: Gen_certs | Gather node certs
+ shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
+ args:
+ executable: /bin/bash
+ warn: false
+ no_log: "{{ not (unsafe_show_logs|bool) }}"
+ register: etcd_node_certs
+ check_mode: no
+ delegate_to: "{{ groups['etcd'][0] }}"
+ changed_when: false
+
+- name: Gen_certs | Copy certs on nodes
+ shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
+ args:
+ executable: /bin/bash
+ no_log: "{{ not (unsafe_show_logs|bool) }}"
+ changed_when: false
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 465be73fa..fb593dbae 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -12,6 +12,16 @@
- etcd-secrets
- include_tasks: upd_ca_trust.yml
+ when:
+ - inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort
+ tags:
+ - etcd-secrets
+
+- include_tasks: upd_ca_trust.yml
+ when:
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - inventory_hostname in groups['k8s_cluster']
tags:
- etcd-secrets
@@ -21,7 +31,9 @@
changed_when: false
check_mode: no
when:
- - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - inventory_hostname in groups['k8s_cluster']
tags:
- master
- network
@@ -30,7 +42,9 @@
set_fact:
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
when:
- - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
+ - inventory_hostname in groups['k8s_cluster']
tags:
- master
- network
diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
index 539701157..b94509f45 100644
--- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
@@ -33,7 +33,7 @@
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
- {%- if resolvconf_mode == 'host_resolvconf' and upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
+ {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
@@ -61,7 +61,7 @@
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
- {%- if resolvconf_mode == 'host_resolvconf' and upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
+ {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- else -%}
/etc/resolv.conf
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
index 567824719..1ee1601d4 100644
--- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
@@ -13,6 +13,11 @@ data:
{{ block['zones'] | join(' ') }} {
log
errors
+{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
+{% for rewrite_match in block['rewrite'] %}
+ rewrite {{ rewrite_match }}
+{% endfor %}
+{% endif %}
forward . {{ block['nameservers'] | join(' ') }}
loadbalance
cache {{ block['cache'] | default(5) }}
@@ -44,10 +49,12 @@ data:
{% if upstream_dns_servers is defined and upstream_dns_servers|length > 0 %}
forward . {{ upstream_dns_servers|join(' ') }} {
prefer_udp
+ max_concurrent 1000
}
{% else %}
forward . /etc/resolv.conf {
prefer_udp
+ max_concurrent 1000
}
{% endif %}
{% if enable_coredns_k8s_external %}
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
index bd36af8d0..edda5c5b2 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
@@ -32,8 +32,14 @@ spec:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
ports:
- containerPort: 8081
args:
@@ -63,8 +69,14 @@ spec:
cpu: {{ netchecker_etcd_cpu_requests }}
memory: {{ netchecker_etcd_memory_requests }}
securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }}
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
tolerations:
- effect: NoSchedule
operator: Exists
diff --git a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
index 5f9027caa..9ea695c48 100644
--- a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
@@ -14,6 +14,11 @@ data:
errors
cache {{ block['cache'] | default(30) }}
reload
+{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
+{% for rewrite_match in block['rewrite'] %}
+ rewrite {{ rewrite_match }}
+{% endfor %}
+{% endif %}
loop
bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }}
diff --git a/roles/kubernetes-apps/argocd/defaults/main.yml b/roles/kubernetes-apps/argocd/defaults/main.yml
index 7b79ef3e6..bb8f1fe9c 100644
--- a/roles/kubernetes-apps/argocd/defaults/main.yml
+++ b/roles/kubernetes-apps/argocd/defaults/main.yml
@@ -1,5 +1,5 @@
---
argocd_enabled: false
-argocd_version: v2.4.7
+argocd_version: v2.4.15
argocd_namespace: argocd
# argocd_admin_password:
diff --git a/roles/kubernetes-apps/argocd/tasks/main.yml b/roles/kubernetes-apps/argocd/tasks/main.yml
index 746ebbf2d..4770d87ea 100644
--- a/roles/kubernetes-apps/argocd/tasks/main.yml
+++ b/roles/kubernetes-apps/argocd/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Install yq
become: yes
get_url:
- url: "https://github.com/mikefarah/yq/releases/download/v4.25.3/yq_linux_amd64"
+ url: "https://github.com/mikefarah/yq/releases/download/v4.27.5/yq_linux_{{ host_architecture }}"
dest: "{{ bin_dir }}/yq"
mode: '0755'
diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
index d0a86bd9f..3cdf9bb94 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
@@ -80,9 +80,6 @@ spec:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- - name: pods-cloud-data
- mountPath: /var/lib/cloud/data
- readOnly: true
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
@@ -110,10 +107,6 @@ spec:
hostPath:
path: /var/lib/kubelet
type: Directory
- - name: pods-cloud-data
- hostPath:
- path: /var/lib/cloud/data
- type: Directory
- name: pods-probe-dir
hostPath:
path: /dev
diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2
index 615721ff0..391d3b33a 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2
@@ -1,4 +1,4 @@
-apiVersion: policy/v1beta1
+apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: cinder-csi-pdb
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
index 01541ec1d..657b3006d 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
@@ -3,8 +3,14 @@ upcloud_csi_controller_replicas: 1
upcloud_csi_provisioner_image_tag: "v3.1.0"
upcloud_csi_attacher_image_tag: "v3.4.0"
upcloud_csi_resizer_image_tag: "v1.4.0"
-upcloud_csi_plugin_image_tag: "v0.2.1"
+upcloud_csi_plugin_image_tag: "v0.3.3"
upcloud_csi_node_image_tag: "v2.5.0"
upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}"
upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}"
-upcloud_tolerations: []
\ No newline at end of file
+upcloud_tolerations: []
+upcloud_csi_enable_volume_snapshot: false
+upcloud_csi_snapshot_controller_replicas: 2
+upcloud_csi_snapshotter_image_tag: "v4.2.1"
+upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
+upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
+upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"
\ No newline at end of file
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
index 63e37bcf5..f37daba92 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
@@ -37,4 +37,4 @@
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
- label: "{{ item.item.file }}"
+ label: "{{ item.item.file }}"
\ No newline at end of file
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2 b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2
index 8f05f77fb..0d52837a9 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-controller.yml.j2
@@ -23,7 +23,7 @@ spec:
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- - "--timeout=60s"
+ - "--timeout=600s"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
@@ -36,7 +36,7 @@ spec:
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- - "--timeout=30s"
+ - "--timeout=120s"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
@@ -48,7 +48,7 @@ spec:
image: registry.k8s.io/sig-storage/csi-resizer:{{ upcloud_csi_resizer_image_tag }}
args:
- "--v=5"
- - "--timeout=45s"
+ - "--timeout=120s"
- "--csi-address=$(ADDRESS)"
- "--handle-volume-inuse-error=true"
env:
@@ -68,8 +68,6 @@ spec:
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- - name: UPCLOUD_API_URL
- value: https://api.upcloud.com/
- name: UPCLOUD_USERNAME
valueFrom:
secretKeyRef:
@@ -92,4 +90,4 @@ spec:
- name: regcred
volumes:
- name: socket-dir
- emptyDir: { }
\ No newline at end of file
+ emptyDir: {}
\ No newline at end of file
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2 b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2
index 269f4cf54..7173c6baf 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-node.yml.j2
@@ -23,15 +23,6 @@ spec:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- lifecycle:
- preStop:
- exec:
- command:
- [
- "/bin/sh",
- "-c",
- "rm -rf /registration/storage.csi.upcloud.com /registration/storage.csi.upcloud.com-reg.sock",
- ]
env:
- name: ADDRESS
value: /csi/csi.sock
@@ -56,8 +47,6 @@ spec:
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- - name: UPCLOUD_API_URL
- value: https://api.upcloud.com/
- name: UPCLOUD_USERNAME
valueFrom:
secretKeyRef:
@@ -76,7 +65,7 @@ spec:
securityContext:
privileged: true
capabilities:
- add: [ "SYS_ADMIN" ]
+ add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: plugin-dir
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2 b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2
index 2a9ec08be..3bc0bd580 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-setup.yml.j2
@@ -5,6 +5,40 @@ metadata:
namespace: kube-system
---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: csi-upcloud-node-sa
+ namespace: kube-system
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-upcloud-node-driver-registrar-role
+ namespace: kube-system
+rules:
+ - apiGroups: [ "" ]
+ resources: [ "events" ]
+ verbs: [ "get", "list", "watch", "create", "update", "patch" ]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-upcloud-node-driver-registrar-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-upcloud-node-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: csi-upcloud-node-driver-registrar-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -28,12 +62,6 @@ rules:
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "list", "watch", "create", "update", "patch" ]
- - apiGroups: [ "snapshot.storage.k8s.io" ]
- resources: [ "volumesnapshots" ]
- verbs: [ "get", "list" ]
- - apiGroups: [ "snapshot.storage.k8s.io" ]
- resources: [ "volumesnapshotcontents" ]
- verbs: [ "get", "list" ]
- apiGroups: [ "" ]
resources: [ "nodes" ]
verbs: [ "get", "list", "watch" ]
@@ -90,87 +118,39 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
---
-kind: ClusterRole
+# Provisioner must be able to work with endpoints and leases in current namespace
+# if (and only if) leadership election is enabled
+kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: csi-upcloud-snapshotter-role
+ namespace: kube-system
+ name: csi-upcloud-provisioner-cfg-role
rules:
- - apiGroups: [ "" ]
- resources: [ "persistentvolumes" ]
- verbs: [ "get", "list", "watch" ]
- - apiGroups: [ "" ]
- resources: [ "persistentvolumeclaims" ]
- verbs: [ "get", "list", "watch" ]
- - apiGroups: [ "storage.k8s.io" ]
- resources: [ "storageclasses" ]
- verbs: [ "get", "list", "watch" ]
- - apiGroups: [ "" ]
- resources: [ "events" ]
- verbs: [ "list", "watch", "create", "update", "patch" ]
- - apiGroups: [ "" ]
- resources: [ "secrets" ]
- verbs: [ "get", "list" ]
- - apiGroups: [ "snapshot.storage.k8s.io" ]
- resources: [ "volumesnapshotclasses" ]
- verbs: [ "get", "list", "watch" ]
- - apiGroups: [ "snapshot.storage.k8s.io" ]
- resources: [ "volumesnapshotcontents" ]
- verbs: [ "create", "get", "list", "watch", "update", "delete" ]
- - apiGroups: [ "snapshot.storage.k8s.io" ]
- resources: [ "volumesnapshots" ]
- verbs: [ "get", "list", "watch", "update" ]
- - apiGroups: [ "apiextensions.k8s.io" ]
- resources: [ "customresourcedefinitions" ]
- verbs: [ "create", "list", "watch", "delete" ]
+- apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+- apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
---
-kind: ClusterRoleBinding
+kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: csi-upcloud-snapshotter-binding
+ name: csi-provisioner-role-cfg-binding
+ namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-upcloud-controller-sa
namespace: kube-system
roleRef:
- kind: ClusterRole
- name: csi-upcloud-snapshotter-role
+ kind: Role
+ name: csi-upcloud-provisioner-cfg-role
apiGroup: rbac.authorization.k8s.io
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: csi-upcloud-node-sa
- namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: csi-upcloud-node-driver-registrar-role
- namespace: kube-system
-rules:
- - apiGroups: [ "" ]
- resources: [ "events" ]
- verbs: [ "get", "list", "watch", "create", "update", "patch" ]
-
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: csi-upcloud-node-driver-registrar-binding
-subjects:
- - kind: ServiceAccount
- name: csi-upcloud-node-sa
- namespace: kube-system
-roleRef:
- kind: ClusterRole
- name: csi-upcloud-node-driver-registrar-role
- apiGroup: rbac.authorization.k8s.io
----
-# Resizer must be able to work with PVCs, PVs, SCs.
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-upcloud-resizer-role
rules:
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
index 93beca307..84e78c3f1 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
@@ -14,6 +14,9 @@ vsphere_csi_node_driver_registrar_image_tag: "v2.5.0"
vsphere_csi_driver_image_tag: "v2.5.1"
vsphere_csi_resizer_tag: "v1.4.0"
+# Set to kube-system for backward compatibility, should be change to vmware-system-csi on the long run
+vsphere_csi_namespace: "kube-system"
+
vsphere_csi_controller_replicas: 1
csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
@@ -22,6 +25,8 @@ vsphere_csi_aggressive_node_drain: False
vsphere_csi_aggressive_node_unreachable_timeout: 300
vsphere_csi_aggressive_node_not_ready_timeout: 300
+vsphere_csi_node_affinity: {}
+
# If this is true, debug information will be displayed but
# may contain some private data, so it is recommended to set it to false
# in the production environment.
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
index c2cf62ab9..5983fa095 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
@@ -16,6 +16,7 @@
dest: "{{ kube_config_dir }}/{{ item }}"
mode: 0644
with_items:
+ - vsphere-csi-namespace.yml
- vsphere-csi-driver.yml
- vsphere-csi-controller-rbac.yml
- vsphere-csi-node-rbac.yml
@@ -27,7 +28,7 @@
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Generate a CSI secret manifest
- command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
+ command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs|bool) }}"
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2
index 56a28d0b3..3e16ae1b0 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2
@@ -21,4 +21,4 @@ data:
kind: ConfigMap
metadata:
name: internal-feature-states.csi.vsphere.vmware.com
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2
index 97be182c2..1c1de2f8f 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-deployment.yml.j2
@@ -2,7 +2,7 @@ kind: Deployment
apiVersion: apps/v1
metadata:
name: vsphere-csi-controller
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
spec:
replicas: {{ vsphere_csi_controller_replicas }}
strategy:
@@ -90,8 +90,8 @@ spec:
image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }}
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- - "--fss-namespace=kube-system"
- - "--supervisor-fss-namespace=kube-system"
+ - "--fss-namespace={{ vsphere_csi_namespace }}"
+ - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
- "--use-gocsi=false"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
@@ -150,8 +150,8 @@ spec:
args:
- "--leader-election"
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- - "--fss-namespace=kube-system"
- - "--supervisor-fss-namespace=kube-system"
+ - "--fss-namespace={{ vsphere_csi_namespace }}"
+ - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 2113
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2
index 80797063a..fd614f9a4 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2
@@ -2,7 +2,7 @@ kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-controller
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
@@ -79,7 +79,7 @@ metadata:
subjects:
- kind: ServiceAccount
name: vsphere-csi-controller
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: ClusterRole
name: vsphere-csi-controller-role
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2
index ccded9b72..75967ba5d 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: vsphere-csi-controller
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
labels:
app: vsphere-csi-controller
spec:
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2
new file mode 100644
index 000000000..0a28bda12
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-namespace.yml.j2
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: "{{ vsphere_csi_namespace }}"
\ No newline at end of file
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2
index c4c172d60..42896e140 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node-rbac.yml.j2
@@ -3,7 +3,7 @@ kind: ServiceAccount
apiVersion: v1
metadata:
name: vsphere-csi-node
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
@@ -24,7 +24,7 @@ metadata:
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: ClusterRole
name: vsphere-csi-node-cluster-role
@@ -34,7 +34,7 @@ kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-role
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
rules:
- apiGroups: [""]
resources: ["configmaps"]
@@ -44,11 +44,11 @@ kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vsphere-csi-node-binding
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
subjects:
- kind: ServiceAccount
name: vsphere-csi-node
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
roleRef:
kind: Role
name: vsphere-csi-node-role
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2
index 322267199..1a8370ddd 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2
@@ -2,7 +2,7 @@ kind: DaemonSet
apiVersion: apps/v1
metadata:
name: vsphere-csi-node
- namespace: kube-system
+ namespace: "{{ vsphere_csi_namespace }}"
spec:
selector:
matchLabels:
@@ -19,6 +19,10 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
+{% if vsphere_csi_node_affinity %}
+ affinity:
+ {{ vsphere_csi_node_affinity | to_nice_yaml | indent(width=8) }}
+{% endif %}
serviceAccountName: vsphere-csi-node
hostNetwork: true
dnsPolicy: "ClusterFirstWithHostNet"
@@ -57,8 +61,8 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
- - "--fss-namespace=kube-system"
- - "--supervisor-fss-namespace=kube-system"
+ - "--fss-namespace={{ vsphere_csi_namespace }}"
+ - "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
- "--use-gocsi=false"
imagePullPolicy: "Always"
env:
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
index 8cf86cf28..fbac89839 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
@@ -21,4 +21,4 @@ external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
## arg1: "value1"
## arg2: "value2"
external_openstack_cloud_controller_extra_args: {}
-external_openstack_cloud_controller_image_tag: "v1.22.0"
+external_openstack_cloud_controller_image_tag: "v1.23.4"
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2 b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2
index 46e4c6e6d..adb08ae99 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-config.j2
@@ -65,6 +65,12 @@ use-octavia=true
{% if external_openstack_enable_ingress_hostname is defined %}
enable-ingress-hostname={{ external_openstack_enable_ingress_hostname | bool }}
{% endif %}
+{% if external_openstack_ingress_hostname_suffix is defined %}
+ingress-hostname-suffix={{ external_openstack_ingress_hostname_suffix | string | lower }}
+{% endif %}
+{% if external_openstack_max_shared_lb is defined %}
+max-shared-lb={{ external_openstack_max_shared_lb }}
+{% endif %}
[Networking]
ipv6-support-disabled={{ external_openstack_network_ipv6_disabled | string | lower }}
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2
index 9f900759f..2ab3a5bfa 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-roles.yml.j2
@@ -93,5 +93,17 @@ items:
- list
- get
- watch
+ - apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+ - apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
kind: List
metadata: {}
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 107c22fb6..fee247b98 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -5,14 +5,11 @@
download: "{{ download_defaults | combine(downloads.helm) }}"
- name: Helm | Copy helm binary from download dir
- synchronize:
+ copy:
src: "{{ local_release_dir }}/helm-{{ helm_version }}/linux-{{ image_arch }}/helm"
dest: "{{ bin_dir }}/helm"
- compress: no
- perms: yes
- owner: no
- group: no
- delegate_to: "{{ inventory_hostname }}"
+ mode: 0755
+ remote_src: true
- name: Helm | Get helm completion
command: "{{ bin_dir }}/helm completion bash"
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2
index 6ac4f0b8f..39fad4f9a 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager.yml.j2
@@ -870,6 +870,11 @@ spec:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
+ capabilities:
+ drop: ['ALL']
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
{% if cert_manager_tolerations %}
tolerations:
{{ cert_manager_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
@@ -944,6 +949,11 @@ spec:
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
+ capabilities:
+ drop: ['ALL']
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
env:
- name: POD_NAMESPACE
valueFrom:
@@ -1040,6 +1050,11 @@ spec:
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
+ capabilities:
+ drop: ['ALL']
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
env:
- name: POD_NAMESPACE
valueFrom:
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
index 21ea68c9d..10cf1a7fc 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
@@ -16,3 +16,5 @@ ingress_nginx_termination_grace_period_seconds: 300
# ingress_nginx_class: nginx
ingress_nginx_webhook_enabled: false
ingress_nginx_webhook_job_ttl: 1800
+
+ingress_nginx_probe_initial_delay_seconds: 10
\ No newline at end of file
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
index 80d25df5a..767502eae 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
@@ -31,3 +31,6 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "watch"]
+ - apiGroups: ["discovery.k8s.io"]
+ resources: ["endpointslices"]
+ verbs: ["get", "list", "watch"]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
index dcec79bf3..6ab424983 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
@@ -108,12 +108,11 @@ spec:
protocol: TCP
{% endif %}
livenessProbe:
- failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
- initialDelaySeconds: 10
+ initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }}
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
@@ -123,7 +122,7 @@ spec:
path: /healthz
port: 10254
scheme: HTTP
- initialDelaySeconds: 10
+ initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }}
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.ym.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.ym.j2
deleted file mode 100644
index 5d1bb0172..000000000
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-admission-webhook.ym.j2
+++ /dev/null
@@ -1,17 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- labels:
- app.kubernetes.io/name: ingress-nginx
- app.kubernetes.io/part-of: ingress-nginx
- name: ingress-nginx-admission
- namespace: {{ ingress_nginx_namespace }}
-rules:
-- apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - create
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
index ee81b3338..58c0488f8 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
@@ -63,3 +63,6 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
+ - apiGroups: ["discovery.k8s.io"]
+ resources: ["endpointslices"]
+ verbs: ["get", "list", "watch"]
diff --git a/roles/kubernetes-apps/metallb/defaults/main.yml b/roles/kubernetes-apps/metallb/defaults/main.yml
index 3679aebe8..dc96fdc7d 100644
--- a/roles/kubernetes-apps/metallb/defaults/main.yml
+++ b/roles/kubernetes-apps/metallb/defaults/main.yml
@@ -5,7 +5,7 @@ metallb_protocol: "layer2"
metallb_port: "7472"
metallb_memberlist_port: "7946"
metallb_peers: []
-metallb_speaker_enabled: true
+metallb_speaker_enabled: "{{ metallb_enabled }}"
metallb_speaker_nodeselector:
kubernetes.io/os: "linux"
metallb_controller_nodeselector:
diff --git a/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml b/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml
index 7ca901ee1..5986e8cb0 100644
--- a/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml
@@ -1,7 +1,12 @@
---
-expand_persistent_volumes: true
-parameters:
- tier: maxiops
storage_classes:
- name: standard
is_default: true
+ expand_persistent_volumes: true
+ parameters:
+ tier: maxiops
+ - name: hdd
+ is_default: false
+ expand_persistent_volumes: true
+ parameters:
+ tier: hdd
diff --git a/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2 b/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2
index 058531a30..d5c84b544 100644
--- a/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2
+++ b/roles/kubernetes-apps/persistent_volumes/upcloud-csi/templates/upcloud-csi-storage-class.yml.j2
@@ -7,9 +7,10 @@ metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
provisioner: storage.csi.upcloud.com
+reclaimPolicy: Retain
parameters:
{% for key, value in (class.parameters | default({})).items() %}
"{{ key }}": "{{ value }}"
{% endfor %}
-allowVolumeExpansion: {{ expand_persistent_volumes }}
+allowVolumeExpansion: {{ class.expand_persistent_volumes | default(true) | ternary("true","false") }}
{% endfor %}
diff --git a/roles/kubernetes-apps/policy_controller/meta/main.yml b/roles/kubernetes-apps/policy_controller/meta/main.yml
index a1050cc19..3f46b8d58 100644
--- a/roles/kubernetes-apps/policy_controller/meta/main.yml
+++ b/roles/kubernetes-apps/policy_controller/meta/main.yml
@@ -2,15 +2,7 @@
dependencies:
- role: policy_controller/calico
when:
- - kube_network_plugin == 'calico'
+ - kube_network_plugin in ['calico', 'canal']
- enable_network_policy
- - calico_datastore != "kdd"
- tags:
- - policy-controller
-
- - role: policy_controller/calico
- when:
- - kube_network_plugin == 'canal'
- - calico_datastore != "kdd"
tags:
- policy-controller
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index dc1352060..5f8c78445 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -150,6 +150,21 @@
- apiserver_sans_check.changed
- not kube_external_ca_mode
+- name: kubeadm | Create directory to store kubeadm patches
+ file:
+ path: "{{ kubeadm_patches.dest_dir }}"
+ state: directory
+ mode: 0640
+ when: kubeadm_patches is defined and kubeadm_patches.enabled
+
+- name: kubeadm | Copy kubeadm patches from inventory files
+ copy:
+ src: "{{ kubeadm_patches.source_dir }}/"
+ dest: "{{ kubeadm_patches.dest_dir }}"
+ owner: "root"
+ mode: 0644
+ when: kubeadm_patches is defined and kubeadm_patches.enabled
+
- name: kubeadm | Initialize first master
command: >-
timeout -k 300s 300s
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
index d44a24849..9f4168cf6 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
@@ -28,6 +28,10 @@ nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
{% endif %}
+{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
+patches:
+ directory: {{ kubeadm_patches.dest_dir }}
+{% endif %}
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
index 7bf876c52..b41b2dbc7 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
@@ -26,3 +26,7 @@ nodeRegistration:
{% else %}
taints: []
{% endif %}
+{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
+patches:
+ directory: {{ kubeadm_patches.dest_dir }}
+{% endif %}
\ No newline at end of file
diff --git a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
index 39b0f0557..be41418d4 100644
--- a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
@@ -1,8 +1,4 @@
-{% if kube_version is version('v1.23.0', '<') %}
-{% set kubescheduler_config_api_version = "v1beta2" %}
-{% else %}
{% set kubescheduler_config_api_version = "v1beta3" %}
-{% endif %}
apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }}
kind: KubeSchedulerConfiguration
clientConnection:
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 13497ffbb..a3cc8620f 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -52,7 +52,7 @@
kubeadm_token: "{{ temp_token.stdout }}"
when: kubeadm_token is not defined
-- name: Set kubeadm api version to v1beta2
+- name: Set kubeadm api version to v1beta3
set_fact:
kubeadmConfig_api_version: v1beta3
@@ -64,6 +64,21 @@
mode: 0640
when: not is_kube_master
+- name: kubeadm | Create directory to store kubeadm patches
+ file:
+ path: "{{ kubeadm_patches.dest_dir }}"
+ state: directory
+ mode: 0640
+ when: kubeadm_patches is defined and kubeadm_patches.enabled
+
+- name: kubeadm | Copy kubeadm patches from inventory files
+ copy:
+ src: "{{ kubeadm_patches.source_dir }}/"
+ dest: "{{ kubeadm_patches.dest_dir }}"
+ owner: "root"
+ mode: 0644
+ when: kubeadm_patches is defined and kubeadm_patches.enabled
+
- name: Join to cluster if needed
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2
index f9b31dc73..64c3db99a 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta3.j2
@@ -19,10 +19,14 @@ discovery:
tlsBootstrapToken: {{ kubeadm_token }}
caCertPath: {{ kube_cert_dir }}/ca.crt
nodeRegistration:
- name: {{ kube_override_hostname }}
+ name: '{{ kube_override_hostname }}'
criSocket: {{ cri_socket }}
{% if 'calico_rr' in group_names and 'kube_node' not in group_names %}
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/calico-rr
{% endif %}
+{% if kubeadm_patches is defined and kubeadm_patches.enabled %}
+patches:
+ directory: {{ kubeadm_patches.dest_dir }}
+{% endif %}
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index f1657c0e6..69a89ab4b 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -22,6 +22,15 @@ kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
### fail with swap on (default true)
kubelet_fail_swap_on: true
+# Set systemd service hardening features
+kubelet_systemd_hardening: false
+
+# List of secure IPs for kubelet
+kubelet_secure_addresses: >-
+ {%- for host in groups['kube_control_plane'] -%}
+ {{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ ' ' if not loop.last else '' }}
+ {%- endfor -%}
+
# Reserve this space for kube resources
kube_memory_reserved: 256Mi
kube_cpu_reserved: 100m
@@ -68,7 +77,7 @@ kube_vip_bgp_routerid:
kube_vip_local_as: 65000
kube_vip_bgp_peeraddress:
kube_vip_bgp_peerpass:
-kube_vip_bgp_peeras:
+kube_vip_bgp_peeras: 65000
kube_vip_bgppeers:
kube_vip_address:
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index 32c01805c..97d52e8c3 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -13,7 +13,7 @@
- block:
- name: look up crio cgroup driver
- shell: "crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
+ shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
register: crio_cgroup_driver_result
changed_when: false
diff --git a/roles/kubernetes/node/templates/kubelet.service.j2 b/roles/kubernetes/node/templates/kubelet.service.j2
index 38360c8df..feb837424 100644
--- a/roles/kubernetes/node/templates/kubelet.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.service.j2
@@ -24,6 +24,11 @@ ExecStart={{ bin_dir }}/kubelet \
$KUBELET_CLOUDPROVIDER
Restart=always
RestartSec=10s
+{% if kubelet_systemd_hardening %}
+# Hardening setup
+IPAddressDeny=any
+IPAddressAllow={{ kubelet_secure_addresses }}
+{% endif %}
[Install]
WantedBy=multi-user.target
diff --git a/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2
index d6789b156..2ca073f52 100644
--- a/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2
@@ -13,7 +13,7 @@ spec:
- name: vip_arp
value: {{ kube_vip_arp_enabled | string | to_json }}
- name: port
- value: "6443"
+ value: {{ kube_apiserver_port | string | to_json }}
{% if kube_vip_interface %}
- name: vip_interface
value: {{ kube_vip_interface | string | to_json }}
@@ -60,10 +60,10 @@ spec:
- name: bgp_peerpass
value: {{ kube_vip_bgp_peerpass | to_json }}
- name: bgp_peeras
- value: {{ kube_vip_bgp_peeras | to_json }}
+ value: {{ kube_vip_bgp_peeras | string | to_json }}
{% if kube_vip_bgppeers %}
- name: bgp_peers
- value: {{ kube_vip_bgp_peeras | join(',') | to_json }}
+ value: {{ kube_vip_bgppeers | join(',') | to_json }}
{% endif %}
{% endif %}
- name: address
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index d1cc42cee..5537b52b8 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -36,6 +36,8 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
# All inventory hostnames will be written into each /etc/hosts file.
populate_inventory_to_hosts_file: true
+# K8S Api FQDN will be written into /etc/hosts file.
+populate_loadbalancer_apiserver_to_hosts_file: true
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
@@ -100,3 +102,6 @@ ntp_tinker_panic: false
# Force sync time immediately after the ntp installed, which is useful in in newly installed system.
ntp_force_sync_immediately: false
+
+# Set the timezone for your server. eg: "Etc/UTC","Etc/GMT-8". If not set, the timezone will not change.
+ntp_timezone: ""
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index 70c6414c9..4c24b83a2 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -95,6 +95,10 @@
- name: Preinstall | restart kube-apiserver crio/containerd
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+ register: preinstall_restart_apiserver
+ retries: 10
+ until: preinstall_restart_apiserver.rc == 0
+ delay: 1
when:
- container_manager in ['crio', 'containerd']
- inventory_hostname in groups['kube_control_plane']
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index f9285811a..b7f9b2570 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -100,6 +100,22 @@
- not ignore_assert_errors
- ip is defined
+- name: Ensure ping package
+ package:
+ name: >-
+ {%- if ansible_os_family in ['RedHat', 'Suse'] -%}
+ iputils
+ {%- else -%}
+ iputils-ping
+ {%- endif -%}
+ state: present
+ when:
+ - access_ip is defined
+ - not ignore_assert_errors
+ - ping_access_ip
+ - not is_fedora_coreos
+ - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
+
- name: Stop if access_ip is not pingable
command: ping -c1 {{ access_ip }}
when:
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index 10602ef3e..58e0685a2 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -6,14 +6,6 @@
tags:
- facts
-- name: Set os_family fact for Kylin Linux Advanced Server
- set_fact:
- ansible_os_family: "RedHat"
- ansible_distribution_major_version: "8"
- when: ansible_distribution == "Kylin Linux Advanced Server"
- tags:
- - facts
-
- name: check if booted with ostree
stat:
path: /run/ostree-booted
@@ -91,12 +83,17 @@
changed_when: false
check_mode: no
+- name: set default dns if remove_default_searchdomains is false
+ set_fact:
+ default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
+ when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
+
- name: set dns facts
set_fact:
resolvconf: >-
{%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%}
bogus_domains: |-
- {% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
+ {% for d in default_searchdomains|default([]) + searchdomains|default([]) -%}
{{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
{%- endfor %}
cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else
@@ -169,11 +166,11 @@
- name: generate search domains to resolvconf
set_fact:
searchentries:
- search {{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}
+ search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }}
domainentry:
domain {{ dns_domain }}
supersede_search:
- supersede domain-search "{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join('", "') }}";
+ supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}";
supersede_domain:
supersede domain-name "{{ dns_domain }}";
@@ -196,9 +193,9 @@
- name: generate nameservers for resolvconf, including cluster DNS
set_fact:
nameserverentries: |-
- {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + coredns_server|d([]) + nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }}
+ {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }}
supersede_nameserver:
- supersede domain-name-servers {{ ( coredns_server|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
+ supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + coredns_server|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
when: not dns_early or dns_late
# This task should run instead of the above task when cluster/nodelocal DNS hasn't
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 2759b53e1..4397cdd63 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -13,9 +13,7 @@
{% for item in nameserverentries.split(',') %}
nameserver {{ item }}
{% endfor %}
- options ndots:{{ ndots }}
- options timeout:2
- options attempts:2
+ options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }}
state: present
insertbefore: BOF
create: yes
@@ -31,7 +29,7 @@
backup: "{{ not resolvconf_stat.stat.islnk }}"
with_nested:
- "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
- - [ 'search ', 'nameserver ', 'domain ', 'options ' ]
+ - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
notify: Preinstall | propagate resolvconf to k8s components
- name: Remove search/domain/nameserver options after block
@@ -42,7 +40,7 @@
backup: "{{ not resolvconf_stat.stat.islnk }}"
with_nested:
- "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
- - [ 'search ', 'nameserver ', 'domain ', 'options ' ]
+ - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
notify: Preinstall | propagate resolvconf to k8s components
- name: get temporary resolveconf cloud init file content
diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
index 851f236ac..f245814ad 100644
--- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
+++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
@@ -9,12 +9,17 @@
backup: yes
notify: Preinstall | update resolvconf for networkmanager
+- name: set default dns if remove_default_searchdomains is false
+ set_fact:
+ default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
+ when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
+
- name: NetworkManager | Add DNS search to NM configuration
ini_file:
path: /etc/NetworkManager/conf.d/dns.conf
section: global-dns
option: searches
- value: "{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(',') }}"
+ value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}"
mode: '0600'
backup: yes
notify: Preinstall | update resolvconf for networkmanager
@@ -24,7 +29,7 @@
path: /etc/NetworkManager/conf.d/dns.conf
section: global-dns
option: options
- value: "ndots:{{ ndots }};timeout:2;attempts:2;"
+ value: "ndots:{{ ndots }};timeout:{{ dns_timeout|default('2') }};attempts:{{ dns_attempts|default('2') }};"
mode: '0600'
backup: yes
notify: Preinstall | update resolvconf for networkmanager
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index 4402a5ce2..b4fccfb89 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -9,6 +9,28 @@
- ansible_pkg_mgr == 'zypper'
tags: bootstrap-os
+- block:
+ - name: Add Debian Backports apt repo
+ apt_repository:
+ repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
+ state: present
+ filename: debian-backports
+
+ - name: Set libseccomp2 pin priority to apt_preferences on Debian buster
+ copy:
+ content: |
+ Package: libseccomp2
+ Pin: release a={{ ansible_distribution_release }}-backports
+ Pin-Priority: 1001
+ dest: "/etc/apt/preferences.d/libseccomp2"
+ owner: "root"
+ mode: 0644
+ when:
+ - ansible_distribution == "Debian"
+ - ansible_distribution_version == "10"
+ tags:
+ - bootstrap-os
+
- name: Update package management cache (APT)
apt:
update_cache: yes
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index 720e7337b..dafa47f79 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -127,3 +127,12 @@
state: present
params: 'numdummies=0'
when: enable_nodelocaldns
+
+- name: Set additional sysctl variables
+ sysctl:
+ sysctl_file: "{{ sysctl_file_path }}"
+ name: "{{ item.name }}"
+ value: "{{ item.value }}"
+ state: present
+ reload: yes
+ with_items: "{{ additional_sysctl }}"
diff --git a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
index ba4578744..d80d14e5e 100644
--- a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
@@ -63,3 +63,17 @@
name: "{{ ntp_service_name }}"
state: started
enabled: true
+
+- name: Ensure tzdata package
+ package:
+ name:
+ - tzdata
+ state: present
+ when:
+ - ntp_timezone
+
+- name: Set timezone
+ timezone:
+ name: "{{ ntp_timezone }}"
+ when:
+ - ntp_timezone
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 693e31f54..ae4ffadd6 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -35,6 +35,7 @@
backup: yes
unsafe_writes: yes
when:
+ - populate_loadbalancer_apiserver_to_hosts_file
- loadbalancer_apiserver is defined
- loadbalancer_apiserver.address is defined
diff --git a/roles/kubernetes/preinstall/templates/chrony.conf.j2 b/roles/kubernetes/preinstall/templates/chrony.conf.j2
index 226f9bc99..7931f435d 100644
--- a/roles/kubernetes/preinstall/templates/chrony.conf.j2
+++ b/roles/kubernetes/preinstall/templates/chrony.conf.j2
@@ -12,7 +12,7 @@ driftfile /var/lib/chrony/drift
{% if ntp_tinker_panic is sameas true %}
# Force time sync if the drift exceeds the threshold specified
-# Usefull for VMs that can be paused and much later resumed.
+# Useful for VMs that can be paused and much later resumed.
makestep 1.0 -1
{% else %}
# Allow the system clock to be stepped in the first three updates
diff --git a/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2 b/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2
index b40a80ae4..8cf8b8192 100644
--- a/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2
+++ b/roles/kubernetes/preinstall/templates/dhclient_dnsupdate.sh.j2
@@ -6,7 +6,7 @@
if [ $reason = "BOUND" ]; then
if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then
RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d')
- OPTIONS="options timeout:2\noptions attempts:2\noptions ndots:{{ ndots }}"
+ OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}"
printf "%b\n" "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf
fi
diff --git a/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2 b/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2
index 028f0fd89..511839fd5 100644
--- a/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2
+++ b/roles/kubernetes/preinstall/templates/dhclient_dnsupdate_rh.sh.j2
@@ -6,7 +6,7 @@
zdnsupdate_config() {
if [ -n "$new_domain_search" -o -n "$new_domain_name_servers" ]; then
RESOLV_CONF=$(cat /etc/resolv.conf | sed -r '/^options (timeout|attempts|ndots).*$/d')
- OPTIONS="options timeout:2\noptions attempts:2\noptions ndots:{{ ndots }}"
+ OPTIONS="options timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }} ndots:{{ ndots }}"
echo -e "$RESOLV_CONF\n$OPTIONS" > /etc/resolv.conf
fi
diff --git a/roles/kubernetes/preinstall/templates/ntp.conf.j2 b/roles/kubernetes/preinstall/templates/ntp.conf.j2
index b49c2e4b2..abeb8996a 100644
--- a/roles/kubernetes/preinstall/templates/ntp.conf.j2
+++ b/roles/kubernetes/preinstall/templates/ntp.conf.j2
@@ -6,7 +6,7 @@ driftfile {{ ntp_driftfile }}
{% if ntp_tinker_panic is sameas true %}
# Always reset the clock, even if the new time is more than 1000s away
-# from the current system time. Usefull for VMs that can be paused
+# from the current system time. Useful for VMs that can be paused
# and much later resumed.
tinker panic 0
{% endif %}
diff --git a/roles/kubernetes/preinstall/templates/resolved.conf.j2 b/roles/kubernetes/preinstall/templates/resolved.conf.j2
index 72d4e3331..901fd2473 100644
--- a/roles/kubernetes/preinstall/templates/resolved.conf.j2
+++ b/roles/kubernetes/preinstall/templates/resolved.conf.j2
@@ -5,7 +5,11 @@
DNS={{ ([nodelocaldns_ip] if enable_nodelocaldns else coredns_server )| list | join(' ') }}
{% endif %}
FallbackDNS={{ ( upstream_dns_servers|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(' ') }}
+{% if remove_default_searchdomains is sameas false or (remove_default_searchdomains is sameas true and searchdomains|default([])|length==0)%}
Domains={{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}
+{% else %}
+Domains={{ searchdomains|default([]) | join(' ') }}
+{% endif %}
#LLMNR=no
#MulticastDNS=no
DNSSEC=no
diff --git a/roles/kubernetes/preinstall/vars/fedora.yml b/roles/kubernetes/preinstall/vars/fedora.yml
index 40d269dc4..d69b111b6 100644
--- a/roles/kubernetes/preinstall/vars/fedora.yml
+++ b/roles/kubernetes/preinstall/vars/fedora.yml
@@ -1,5 +1,6 @@
---
required_pkgs:
+ - iptables
- libselinux-python3
- device-mapper-libs
- conntrack
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 364b82129..4cbe0b2c4 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -15,10 +15,10 @@ is_fedora_coreos: false
disable_swap: true
## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.24.4
+kube_version: v1.25.3
## The minimum version working
-kube_version_min_required: v1.22.0
+kube_version_min_required: v1.23.0
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: ipvs
@@ -290,7 +290,7 @@ cri_socket: >-
{%- if container_manager == 'crio' -%}
unix:///var/run/crio/crio.sock
{%- elif container_manager == 'containerd' -%}
- unix:////var/run/containerd/containerd.sock
+ unix:///var/run/containerd/containerd.sock
{%- elif container_manager == 'docker' -%}
unix:///var/run/cri-dockerd.sock
{%- endif -%}
@@ -409,6 +409,7 @@ ingress_alb_enabled: false
cert_manager_enabled: false
expand_persistent_volumes: false
metallb_enabled: false
+metallb_speaker_enabled: "{{ metallb_enabled }}"
argocd_enabled: false
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
@@ -440,6 +441,8 @@ openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
# Default values for the external OpenStack Cloud Controller
external_openstack_enable_ingress_hostname: false
+external_openstack_ingress_hostname_suffix: "nip.io"
+external_openstack_max_shared_lb: 2
external_openstack_lbaas_create_monitor: false
external_openstack_lbaas_monitor_delay: "1m"
external_openstack_lbaas_monitor_timeout: "30s"
@@ -474,7 +477,7 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
-kubelet_authorization_mode_webhook: true
+kubelet_authorization_mode_webhook: false
# kubelet uses certificates for authenticating to the Kubernetes API
# Automatically generate a new key and request a new certificate from the Kubernetes API as the current certificate approaches expiration
@@ -485,6 +488,12 @@ kubelet_rotate_server_certificates: false
# If set to true, kubelet errors if any of kernel tunables is different than kubelet defaults
kubelet_protect_kernel_defaults: true
+# Set additional sysctl variables to modify Linux kernel variables, for example:
+# additional_sysctl:
+# - { name: kernel.pid_max, value: 131072 }
+#
+additional_sysctl: []
+
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates: []
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 2f1c42e0b..0c2be2dd8 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -42,7 +42,7 @@ global_as_num: "64512"
# Advertise Service External IPs
calico_advertise_service_external_ips: []
-# Adveritse Service LoadBalancer IPs
+# Advertise Service LoadBalancer IPs
calico_advertise_service_loadbalancer_ips: []
# Calico eBPF support
@@ -108,6 +108,10 @@ calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/
# calico_ip_auto_method: "interface=eth.*"
# calico_ip6_auto_method: "interface=eth.*"
+# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection.
+# see https://projectcalico.docs.tigera.io/reference/felix/configuration
+# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
+
calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}"
kube_etcd_cacert_file: ca.pem
@@ -155,3 +159,7 @@ calico_ipam_maxblocksperhost: 0
# Calico apiserver (only with kdd)
calico_apiserver_enabled: false
+
+# Calico feature detect override, set "ChecksumOffloadBroken=true" to
+# solve the https://github.com/projectcalico/calico/issues/3145
+calico_feature_detect_override: ""
diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml
index 970cad83a..7070076b1 100644
--- a/roles/network_plugin/calico/rr/tasks/update-node.yml
+++ b/roles/network_plugin/calico/rr/tasks/update-node.yml
@@ -7,13 +7,13 @@
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
- name: Calico | Set label for route reflector # noqa 301 305
- shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calcio_rr_id }} --overwrite"
+ shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
changed_when: false
register: calico_rr_id_label
until: calico_rr_id_label is succeeded
delay: "{{ retry_stagger | random + 3 }}"
retries: 10
- when: calcio_rr_id is defined
+ when: calico_rr_id is defined
- name: Calico-rr | Fetch current node object
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 11d5d2f09..d55c910ac 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -187,7 +187,8 @@
"bpfExternalServiceMode": "{{ calico_bpf_service_mode }}",
"wireguardEnabled": {{ calico_wireguard_enabled | bool }},
"logSeverityScreen": "{{ calico_felix_log_severity_screen }}",
- "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }}
+ "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }},
+ "featureDetectOverride": "{{ calico_feature_detect_override }}"
}
}
@@ -226,7 +227,7 @@
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
"ipipMode": "{{ calico_ipip_mode }}",
"vxlanMode": "{{ calico_vxlan_mode }}",
- "natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}
+ "natOutgoing": {{ nat_outgoing|default(false) }}
}
}
@@ -265,7 +266,7 @@
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
- "natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }}
+ "natOutgoing": {{ nat_outgoing_ipv6|default(false) }}
}
}
@@ -458,6 +459,9 @@
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/calico-ipamconfig.yml"
state: "latest"
+ register: resource_result
+ until: resource_result is succeeded
+ retries: 4
when:
- inventory_hostname == groups['kube_control_plane'][0]
- calico_datastore == "kdd"
diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
index c95924ac6..efa98c5d9 100644
--- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
@@ -19,10 +19,10 @@
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
- "name": "{{ calcio_rr_id }}-to-node"
+ "name": "{{ calico_rr_id }}-to-node"
},
"spec": {
- "peerSelector": "calico-rr-id == '{{ calcio_rr_id }}'",
+ "peerSelector": "calico-rr-id == '{{ calico_rr_id }}'",
"nodeSelector": "calico-group-id == '{{ calico_group_id }}'"
}}
register: output
@@ -30,7 +30,7 @@
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- - calcio_rr_id is defined
+ - calico_rr_id is defined
- calico_group_id is defined
- inventory_hostname in groups['calico_rr']
@@ -58,7 +58,7 @@
- "{{ groups['calico_rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- - calcio_rr_id is not defined or calico_group_id is not defined
+ - calico_rr_id is not defined or calico_group_id is not defined
- name: Calico | Configure route reflectors to peer with each other
command:
diff --git a/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 b/roles/network_plugin/calico/templates/calico-apiserver.yml.j2
index 2ee15b4c8..dabc7a3f5 100644
--- a/roles/network_plugin/calico/templates/calico-apiserver.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-apiserver.yml.j2
@@ -285,35 +285,3 @@ subjects:
- kind: ServiceAccount
name: calico-apiserver
namespace: calico-apiserver
-
----
-
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- annotations:
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
- name: calico-apiserver
-spec:
- allowPrivilegeEscalation: false
- fsGroup:
- ranges:
- - max: 65535
- min: 1
- rule: MustRunAs
- hostPorts:
- - max: 65535
- min: 0
- requiredDropCapabilities:
- - ALL
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- ranges:
- - max: 65535
- min: 1
- rule: MustRunAs
- volumes:
- - secret
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index cc461516b..3af01c8e7 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -275,6 +275,10 @@ spec:
- name: IP6_AUTODETECTION_METHOD
value: "{{ calico_ip6_auto_method }}"
{% endif %}
+{% if calico_felix_mtu_iface_pattern is defined %}
+ - name: FELIX_MTUIFACEPATTERN
+ value: "{{ calico_felix_mtu_iface_pattern }}"
+{% endif %}
{% if enable_dual_stack_networks %}
- name: IP6
value: autodetect
diff --git a/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/roles/network_plugin/calico/templates/calico-typha.yml.j2
index 65e6606b2..22d2f2cac 100644
--- a/roles/network_plugin/calico/templates/calico-typha.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-typha.yml.j2
@@ -176,7 +176,7 @@ spec:
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
-apiVersion: policy/v1beta1
+apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: calico-typha
diff --git a/roles/network_plugin/calico/vars/rocky-9.yml b/roles/network_plugin/calico/vars/rocky-9.yml
new file mode 100644
index 000000000..43df5457a
--- /dev/null
+++ b/roles/network_plugin/calico/vars/rocky-9.yml
@@ -0,0 +1,3 @@
+---
+calico_wireguard_packages:
+ - wireguard-tools
diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml
index c590637dc..0e624e53c 100644
--- a/roles/network_plugin/cilium/defaults/main.yml
+++ b/roles/network_plugin/cilium/defaults/main.yml
@@ -1,4 +1,5 @@
---
+cilium_min_version_required: "1.10"
# Log-level
cilium_debug: false
@@ -7,7 +8,7 @@ cilium_enable_ipv4: true
cilium_enable_ipv6: false
# Cilium agent health port
-cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}"
+cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
@@ -106,6 +107,7 @@ cilium_wireguard_userspace_fallback: false
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
cilium_ip_masq_agent_enable: false
+
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
cilium_non_masquerade_cidrs:
- 10.0.0.0/8
@@ -201,7 +203,7 @@ cilium_cgroup_host_root: "/run/cilium/cgroupv2"
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
-cilium_bpf_map_dynamic_size_ratio: "{%- if cilium_version | regex_replace('v') is version('1.8', '>=') -%}0.0025{%- else -%}0.0{%- endif -%}"
+cilium_bpf_map_dynamic_size_ratio: "0.0025"
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
# Available for Cilium v1.10 and up
@@ -240,3 +242,8 @@ cilium_disable_cnp_status_updates: true
# Configure how long to wait for the Cilium DaemonSet to be ready again
cilium_rolling_restart_wait_retries_count: 30
cilium_rolling_restart_wait_retries_delay_seconds: 10
+
+# Cilium changed the default metrics exporter ports in 1.12
+cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}"
+cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}"
+cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}"
diff --git a/roles/network_plugin/cilium/tasks/check.yml b/roles/network_plugin/cilium/tasks/check.yml
index fffa1b53a..c65591f66 100644
--- a/roles/network_plugin/cilium/tasks/check.yml
+++ b/roles/network_plugin/cilium/tasks/check.yml
@@ -48,13 +48,10 @@
msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'"
when: cilium_encryption_enabled
-- name: Stop if `cilium_encryption_type` is set to "wireguard" and cilium_version is < v1.10.0
+- name: Stop if cilium_version is < v1.10.0
assert:
- that: cilium_version | regex_replace('v') is version('1.10', '>')
- msg: "cilium_encryption_type is set to 'wireguard' but cilium_version is < v1.10.0"
- when:
- - cilium_encryption_enabled
- - cilium_encryption_type == "wireguard"
+ that: cilium_version | regex_replace('v') is version(cilium_min_version_required, '>=')
+ msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}"
# TODO: Clean this task up when we drop backward compatibility support for `cilium_ipsec_enabled`
- name: Set `cilium_encryption_type` to "ipsec" and if `cilium_ipsec_enabled` is true
diff --git a/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2
index 9f7a71174..8a40a6641 100644
--- a/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2
@@ -39,7 +39,14 @@ rules:
- get
- list
- watch
-{% if cilium_version | regex_replace('v') is version('1.10', '>=') %}
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
- apiGroups:
- ""
resources:
@@ -47,22 +54,14 @@ rules:
- services/status
verbs:
- update
-{% endif %}
- apiGroups:
- ""
resources:
- # to automatically read from k8s and import the node's pod CIDR to cilium's
- # etcd so all nodes know how to reach another pod running in in a different
- # node.
- - nodes
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
- - componentstatuses
-{% endif %}
verbs:
- get
- list
@@ -72,26 +71,22 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
+ - ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
+ - ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
-{% if cilium_version | regex_replace('v') is version('1.6', '>=') %}
+ - ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
+ - ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/status
-{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.9', '>=') %}
- - ciliumnetworkpolicies/finalizers
- - ciliumclusterwidenetworkpolicies/finalizers
- - ciliumendpoints/finalizers
- - ciliumnodes/finalizers
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
-{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
@@ -101,12 +96,7 @@ rules:
- ciliumenvoyconfigs
{% endif %}
verbs:
- - get
- - list
- - watch
- - create
- - update
- - delete
+ - '*'
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -117,16 +107,12 @@ rules:
- list
- update
- watch
-{% if cilium_version | regex_replace('v') is version('1.8', '>=') %}
- # For cilium-operator running in HA mode.
- #
- # Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
- # between mulitple running instances.
- # The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
- # common and fewer objects in the cluster watch "all Leases".
- # The support for leases was introduced in coordination.k8s.io/v1 during Kubernetes 1.14 release.
- # In Cilium we currently don't support HA mode for K8s version < 1.14. This condition make sure
- # that we only authorize access to leases resources in supported K8s versions.
+# For cilium-operator running in HA mode.
+#
+# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
+# between multiple running instances.
+# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
+# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
@@ -135,4 +121,26 @@ rules:
- create
- get
- update
+{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - update
+ resourceNames:
+ - ciliumbgploadbalancerippools.cilium.io
+ - ciliumbgppeeringpolicies.cilium.io
+ - ciliumclusterwideenvoyconfigs.cilium.io
+ - ciliumclusterwidenetworkpolicies.cilium.io
+ - ciliumegressgatewaypolicies.cilium.io
+ - ciliumegressnatpolicies.cilium.io
+ - ciliumendpoints.cilium.io
+ - ciliumendpointslices.cilium.io
+ - ciliumenvoyconfigs.cilium.io
+ - ciliumexternalworkloads.cilium.io
+ - ciliumidentities.cilium.io
+ - ciliumlocalredirectpolicies.cilium.io
+ - ciliumnetworkpolicies.cilium.io
+ - ciliumnodes.cilium.io
{% endif %}
diff --git a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
index ab8a31926..5a5bd4a92 100644
--- a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
@@ -2,11 +2,11 @@
apiVersion: apps/v1
kind: Deployment
metadata:
+ name: cilium-operator
+ namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
- name: cilium-operator
- namespace: kube-system
spec:
replicas: {{ cilium_operator_replicas }}
selector:
@@ -22,29 +22,22 @@ spec:
metadata:
{% if cilium_enable_prometheus %}
annotations:
- prometheus.io/port: "6942"
+ prometheus.io/port: "{{ cilium_operator_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
labels:
io.cilium/app: operator
name: cilium-operator
spec:
- # In HA mode, cilium-operator pods must not be scheduled on the same
- # node as they will clash with each other.
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: io.cilium/app
- operator: In
- values:
- - operator
- topologyKey: "kubernetes.io/hostname"
containers:
- - args:
- - --debug=$(CILIUM_DEBUG)
+ - name: cilium-operator
+ image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ command:
+ - cilium-operator
+ args:
- --config-dir=/tmp/cilium/config-map
+ - --debug=$(CILIUM_DEBUG)
{% if cilium_operator_custom_args is string %}
- {{ cilium_operator_custom_args }}
{% else %}
@@ -52,14 +45,7 @@ spec:
- {{ flag }}
{% endfor %}
{% endif %}
- command:
- - cilium-operator
env:
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
@@ -76,45 +62,23 @@ spec:
key: debug
name: cilium-config
optional: true
-# We are already mounting the whole ConfigMap as a directory.
-# https://github.com/cilium/cilium/pull/10347
-{% if cilium_version | regex_replace('v') is version('1.8', '<') %}
- - name: CILIUM_CLUSTER_NAME
- valueFrom:
- configMapKeyRef:
- key: cluster-name
- name: cilium-config
- optional: true
- - name: CILIUM_CLUSTER_ID
- valueFrom:
- configMapKeyRef:
- key: cluster-id
- name: cilium-config
- optional: true
- - name: CILIUM_DISABLE_ENDPOINT_CRD
- valueFrom:
- configMapKeyRef:
- key: disable-endpoint-crd
- name: cilium-config
- optional: true
-{% endif %}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
- key: AWS_ACCESS_KEY_ID
name: cilium-aws
+ key: AWS_ACCESS_KEY_ID
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
- key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
+ key: AWS_SECRET_ACCESS_KEY
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
- key: AWS_DEFAULT_REGION
name: cilium-aws
+ key: AWS_DEFAULT_REGION
optional: true
{% if cilium_kube_proxy_replacement == 'strict' %}
- name: KUBERNETES_SERVICE_HOST
@@ -122,14 +86,16 @@ spec:
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
- image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
- imagePullPolicy: {{ k8s_image_pull_policy }}
- name: cilium-operator
{% if cilium_enable_prometheus %}
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
ports:
- - containerPort: 6942
- hostPort: 6942
- name: prometheus
+ - name: prometheus
+ containerPort: {{ cilium_operator_scrape_port }}
+ hostPort: {{ cilium_operator_scrape_port }}
protocol: TCP
{% endif %}
livenessProbe:
@@ -146,46 +112,55 @@ spec:
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
-{% if cilium_identity_allocation_mode == "kvstore" %}
- - mountPath: /var/lib/etcd-config
- name: etcd-config-path
+ - name: cilium-config-path
+ mountPath: /tmp/cilium/config-map
readOnly: true
- - mountPath: "{{cilium_cert_dir}}"
- name: etcd-secrets
+{% if cilium_identity_allocation_mode == "kvstore" %}
+ - name: etcd-config-path
+ mountPath: /var/lib/etcd-config
+ readOnly: true
+ - name: etcd-secrets
+ mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
- - mountPath: /tmp/cilium/config-map
- name: cilium-config-path
- readOnly: true
{% for volume_mount in cilium_operator_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }}
{% endfor %}
- dnsPolicy: ClusterFirst
- priorityClassName: system-node-critical
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
restartPolicy: Always
+ priorityClassName: system-node-critical
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
- hostNetwork: true
+ # In HA mode, cilium-operator pods must not be scheduled on the same
+ # node as they will clash with each other.
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ io.cilium/app: operator
tolerations:
- operator: Exists
volumes:
+ - name: cilium-config-path
+ configMap:
+ name: cilium-config
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- - configMap:
+ - name: etcd-config-path
+ configMap:
+ name: cilium-config
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
- name: cilium-config
- name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
{% endif %}
- - configMap:
- name: cilium-config
- name: cilium-config-path
{% for volume in cilium_operator_extra_volumes %}
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2
index 75232991f..6e647760d 100644
--- a/roles/network_plugin/cilium/templates/cilium/config.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2
@@ -41,8 +41,8 @@ data:
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
- prometheus-serve-addr: ":9090"
- operator-prometheus-serve-addr: ":6942"
+ prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}"
+ operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}"
enable-metrics: "true"
{% endif %}
@@ -127,31 +127,6 @@ data:
cluster-id: "{{ cilium_cluster_id }}"
{% endif %}
-# `tofqdns-enable-poller` is deprecated in 1.8, removed in 1.9
-# https://github.com/cilium/cilium/issues/8604
-{% if cilium_version | regex_replace('v') is version('1.9', '<') %}
- # DNS Polling periodically issues a DNS lookup for each `matchName` from
- # cilium-agent. The result is used to regenerate endpoint policy.
- # DNS lookups are repeated with an interval of 5 seconds, and are made for
- # A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
- # data is used instead. An IP change will trigger a regeneration of the Cilium
- # policy for each endpoint and increment the per cilium-agent policy
- # repository revision.
- #
- # This option is disabled by default starting from version 1.4.x in favor
- # of a more powerful DNS proxy-based implementation, see [0] for details.
- # Enable this option if you want to use FQDN policies but do not want to use
- # the DNS proxy.
- #
- # To ease upgrade, users may opt to set this option to "true".
- # Otherwise please refer to the Upgrade Guide [1] which explains how to
- # prepare policy rules for upgrade.
- #
- # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
- # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
- tofqdns-enable-poller: "{{cilium_tofqdns_enable_poller}}"
-{% endif %}
-
# `wait-bpf-mount` is removed after v1.10.4
# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da
{% if cilium_version | regex_replace('v') is version('1.10.4', '<') %}
@@ -159,14 +134,6 @@ data:
wait-bpf-mount: "false"
{% endif %}
-# `enable-legacy-services` is deprecated in 1.6, removed in 1.9
-# https://github.com/cilium/cilium/pull/10255
-{% if cilium_version | regex_replace('v') is version('1.9', '<') %}
- # Enable legacy services (prior v1.5) to prevent from terminating existing
- # connections with services when upgrading Cilium from < v1.5 to v1.5.
- enable-legacy-services: "{{cilium_enable_legacy_services}}"
-{% endif %}
-
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
# `native-routing-cidr` is deprecated in 1.10, removed in 1.12.
@@ -175,12 +142,12 @@ data:
{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% else %}
- {% if cilium_native_routing_cidr | length %}
+{% if cilium_native_routing_cidr | length %}
ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}"
- {% endif %}
- {% if cilium_native_routing_cidr_ipv6 | length %}
+{% endif %}
+{% if cilium_native_routing_cidr_ipv6 | length %}
ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}"
- {% endif %}
+{% endif %}
{% endif %}
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
@@ -191,7 +158,7 @@ data:
{% if cilium_enable_hubble %}
enable-hubble: "true"
{% if cilium_enable_hubble_metrics %}
- hubble-metrics-server: ":9091"
+ hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}"
hubble-metrics:
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
{{ hubble_metrics_cycle }}
@@ -215,22 +182,20 @@ data:
# Enable transparent network encryption
{% if cilium_encryption_enabled %}
- {% if cilium_encryption_type == "ipsec" %}
+{% if cilium_encryption_type == "ipsec" %}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
encrypt-node: "{{ cilium_ipsec_node_encryption }}"
- {% endif %}
+{% endif %}
- {% if cilium_encryption_type == "wireguard" %}
+{% if cilium_encryption_type == "wireguard" %}
enable-wireguard: "true"
enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}"
- {% endif %}
+{% endif %}
{% endif %}
# IPAM settings
-{% if cilium_version | regex_replace('v') is version('1.9', '>=') %}
ipam: "{{ cilium_ipam_mode }}"
-{% endif %}
agent-health-port: "{{ cilium_agent_health_port }}"
@@ -240,10 +205,8 @@ data:
bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}"
-{% if cilium_version | regex_replace('v') is version('1.10', '>=') %}
enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}"
enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}"
-{% endif %}
enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}"
@@ -258,7 +221,6 @@ data:
enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}"
disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}"
-
{% if cilium_ip_masq_agent_enable %}
---
apiVersion: v1
diff --git a/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2
index 02d8e6bf7..a16211c17 100644
--- a/roles/network_plugin/cilium/templates/cilium/cr.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2
@@ -7,9 +7,6 @@ rules:
- apiGroups:
- networking.k8s.io
resources:
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
- - ingresses
-{% endif %}
- networkpolicies
verbs:
- get
@@ -28,34 +25,25 @@ rules:
resources:
- namespaces
- services
- - nodes
+ - pods
- endpoints
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
- - componentstatuses
-{% endif %}
+ - nodes
verbs:
- get
- list
- watch
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
-- apiGroups:
- - extensions
- resources:
- - ingresses
- verbs:
- - create
- - get
- - list
- - watch
-{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.7', '>') %}
+{% if cilium_version | regex_replace('v') is version('1.12', '<') %}
- apiGroups:
- ""
resources:
+ - pods
- pods/finalizers
verbs:
+ - get
+ - list
+ - watch
- update
-{% endif %}
+ - delete
- apiGroups:
- ""
resources:
@@ -66,6 +54,7 @@ rules:
- list
- watch
- update
+{% endif %}
- apiGroups:
- ""
resources:
@@ -78,47 +67,45 @@ rules:
resources:
- customresourcedefinitions
verbs:
+ # Deprecated for removal in v1.10
- create
- - get
- list
- watch
- update
+
+ # This is used when validating policies in preflight. This will need to stay
+ # until we figure out how to avoid "get" inside the preflight, and then
+ # should be removed ideally.
+ - get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
-{% if cilium_version | regex_replace('v') is version('1.7', '>=') %}
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
-{% endif %}
- ciliumendpoints
- ciliumendpoints/status
-{% if cilium_version | regex_replace('v') is version('1.6', '>=') %}
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- - ciliumidentities/status
-{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.9', '>=') %}
- - ciliumnetworkpolicies/finalizers
- - ciliumclusterwidenetworkpolicies/finalizers
- - ciliumendpoints/finalizers
- - ciliumnodes/finalizers
- - ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- - ciliumlocalredirectpolicies/finalizers
-{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.10', '>=') %}
- ciliumegressnatpolicies
-{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
+{% endif %}
+{% if cilium_version | regex_replace('v') is version('1.11.5', '<') %}
+ - ciliumnetworkpolicies/finalizers
+ - ciliumclusterwidenetworkpolicies/finalizers
+ - ciliumendpoints/finalizers
+ - ciliumnodes/finalizers
+ - ciliumidentities/finalizers
+ - ciliumlocalredirectpolicies/finalizers
{% endif %}
verbs:
- '*'
@@ -128,6 +115,7 @@ rules:
resources:
- ciliumclusterwideenvoyconfigs
- ciliumenvoyconfigs
+ - ciliumegressgatewaypolicies
verbs:
- list
- watch
diff --git a/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2
index f7516d707..d23897fa0 100644
--- a/roles/network_plugin/cilium/templates/cilium/crb.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2
@@ -11,8 +11,3 @@ subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
-{% if cilium_version | regex_replace('v') is version('1.9', '<') %}
-- apiGroup: rbac.authorization.k8s.io
- kind: Group
- name: system:nodes
-{% endif %}
diff --git a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
index d5ceaa676..08385b457 100644
--- a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
@@ -2,37 +2,37 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
- labels:
- k8s-app: cilium
name: cilium
namespace: kube-system
+ labels:
+ k8s-app: cilium
spec:
selector:
matchLabels:
k8s-app: cilium
+ updateStrategy:
+ rollingUpdate:
+ # Specifies the maximum number of Pods that can be unavailable during the update process.
+ maxUnavailable: 2
+ type: RollingUpdate
template:
metadata:
annotations:
{% if cilium_enable_prometheus %}
- prometheus.io/port: "9090"
+ prometheus.io/port: "{{ cilium_agent_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
spec:
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: k8s-app
- operator: In
- values:
- - cilium
- topologyKey: kubernetes.io/hostname
containers:
- - args:
+ - name: cilium-agent
+ image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ command:
+ - cilium-agent
+ args:
- --config-dir=/tmp/cilium/config-map
{% if cilium_mtu != "" %}
- --mtu={{ cilium_mtu }}
@@ -44,54 +44,6 @@ spec:
- {{ flag }}
{% endfor %}
{% endif %}
- command:
- - cilium-agent
- env:
- - name: K8S_NODE_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: spec.nodeName
- - name: CILIUM_K8S_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- - name: CILIUM_CLUSTERMESH_CONFIG
- value: /var/lib/cilium/clustermesh/
-{% if cilium_kube_proxy_replacement == 'strict' %}
- - name: KUBERNETES_SERVICE_HOST
- value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- - name: KUBERNETES_SERVICE_PORT
- value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
-{% endif %}
-{% for env_var in cilium_agent_extra_env_vars %}
- - {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
-{% endfor %}
- image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
- imagePullPolicy: {{ k8s_image_pull_policy }}
- resources:
- limits:
- cpu: {{ cilium_cpu_limit }}
- memory: {{ cilium_memory_limit }}
- requests:
- cpu: {{ cilium_cpu_requests }}
- memory: {{ cilium_memory_requests }}
- lifecycle:
- postStart:
- exec:
- command:
- - "/cni-install.sh"
-{% if cilium_version | regex_replace('v') is version('1.10', '>=') %}
- - "--cni-exclusive={{ cilium_cni_exclusive }}"
-{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
- - "--log-file={{ cilium_cni_log_file }}"
-{% endif %}
- preStop:
- exec:
- command:
- - /cni-uninstall.sh
startupProbe:
httpGet:
host: '127.0.0.1'
@@ -131,60 +83,89 @@ spec:
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
- name: cilium-agent
+ env:
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: CILIUM_K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CILIUM_CLUSTERMESH_CONFIG
+ value: /var/lib/cilium/clustermesh/
+{% if cilium_kube_proxy_replacement == 'strict' %}
+ - name: KUBERNETES_SERVICE_HOST
+ value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
+ - name: KUBERNETES_SERVICE_PORT
+ value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
+{% endif %}
+{% for env_var in cilium_agent_extra_env_vars %}
+ - {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
+{% endfor %}
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/cni-install.sh"
+ - "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}"
+{% if cilium_version | regex_replace('v') is version('1.12', '>=') %}
+ - "--enable-debug={{ cilium_debug | string | lower }}"
+ - "--log-file={{ cilium_cni_log_file }}"
+{% endif %}
+ preStop:
+ exec:
+ command:
+ - /cni-uninstall.sh
+ resources:
+ limits:
+ cpu: {{ cilium_cpu_limit }}
+ memory: {{ cilium_memory_limit }}
+ requests:
+ cpu: {{ cilium_cpu_requests }}
+ memory: {{ cilium_memory_requests }}
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
ports:
{% endif %}
{% if cilium_enable_prometheus %}
- - containerPort: 9090
- hostPort: 9090
- name: prometheus
+ - name: prometheus
+ containerPort: {{ cilium_agent_scrape_port }}
+ hostPort: {{ cilium_agent_scrape_port }}
protocol: TCP
{% endif %}
{% if cilium_enable_hubble_metrics %}
- - containerPort: 9091
- hostPort: 9091
- name: hubble-metrics
+ - name: hubble-metrics
+ containerPort: {{ cilium_hubble_scrape_port }}
+ hostPort: {{ cilium_hubble_scrape_port }}
protocol: TCP
{% endif %}
securityContext:
privileged: true
volumeMounts:
- - mountPath: /sys/fs/bpf
- name: bpf-maps
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- - mountPath: /var/run/cilium
- name: cilium-run
- - mountPath: /host/opt/cni/bin
- name: cni-path
- - mountPath: /host/etc/cni/net.d
- name: etc-cni-netd
-# pkg/workloads was depreca, removed in 1.7
-# https://github.com/cilium/cilium/pull/9447
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
-{% if container_manager == 'docker' %}
- - mountPath: /var/run/docker.sock
- name: docker-socket
- readOnly: true
-{% else %}
- - name: "{{ container_manager }}-socket"
- mountPath: {{ cri_socket }}
- readOnly: true
-{% endif %}
-{% endif %}
+ - name: cilium-run
+ mountPath: /var/run/cilium
+ - name: cni-path
+ mountPath: /host/opt/cni/bin
+ - name: etc-cni-netd
+ mountPath: /host/etc/cni/net.d
{% if cilium_identity_allocation_mode == "kvstore" %}
- - mountPath: /var/lib/etcd-config
- name: etcd-config-path
+ - name: etcd-config-path
+ mountPath: /var/lib/etcd-config
readOnly: true
- - mountPath: "{{cilium_cert_dir}}"
- name: etcd-secrets
+ - name: etcd-secrets
+ mountPath: "{{cilium_cert_dir}}"
readOnly: true
{% endif %}
- - mountPath: /var/lib/cilium/clustermesh
- name: clustermesh-secrets
+ - name: clustermesh-secrets
+ mountPath: /var/lib/cilium/clustermesh
readOnly: true
- - mountPath: /tmp/cilium/config-map
- name: cilium-config-path
+ - name: cilium-config-path
+ mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
@@ -192,19 +173,19 @@ spec:
readOnly: true
{% endif %}
# Needed to be able to load kernel modules
- - mountPath: /lib/modules
- name: lib-modules
+ - name: lib-modules
+ mountPath: /lib/modules
readOnly: true
- - mountPath: /run/xtables.lock
- name: xtables-lock
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- - mountPath: /etc/ipsec
- name: cilium-ipsec-secrets
+ - name: cilium-ipsec-secrets
+ mountPath: /etc/ipsec
readOnly: true
{% endif %}
{% if cilium_hubble_install %}
- - mountPath: /var/lib/cilium/tls/hubble
- name: hubble-tls
+ - name: hubble-tls
+ mountPath: /var/lib/cilium/tls/hubble
readOnly: true
{% endif %}
{% for volume_mount in cilium_agent_extra_volume_mounts %}
@@ -245,22 +226,50 @@ spec:
securityContext:
privileged: true
{% endif %}
- - command:
+{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
+ - name: apply-sysctl-overwrites
+ image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ env:
+ - name: BIN_PATH
+ value: /opt/cni/bin
+ command:
+ - sh
+ - -ec
+ # The statically linked Go program binary is invoked to avoid any
+ # dependency on utilities like sh that can be missing on certain
+ # distros installed on the underlying host. Copy the binary to the
+ # same directory where we install cilium cni plugin so that exec permissions
+ # are available.
+ - |
+ cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
+ nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
+ rm /hostbin/cilium-sysctlfix
+ volumeMounts:
+ - name: hostproc
+ mountPath: /hostproc
+ - name: cni-path
+ mountPath: /hostbin
+ securityContext:
+ privileged: true
+{% endif %}
+ - name: clean-cilium-state
+ image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
- key: clean-cilium-state
name: cilium-config
+ key: clean-cilium-state
optional: true
- # CLEAN_CILIUM_BPF_STATE is deprecated in 1.6.
- # https://github.com/cilium/cilium/pull/7478
- - name: "{{ cilium_version | regex_replace('v') is version('1.6', '<')| ternary('CLEAN_CILIUM_BPF_STATE','CILIUM_BPF_STATE') }}"
+ - name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
- key: clean-cilium-bpf-state
name: cilium-config
+ key: clean-cilium-bpf-state
optional: true
# Removed in 1.11 and up.
# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9
@@ -278,32 +287,25 @@ spec:
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
-{% if cilium_version | regex_replace('v') is version('1.9', '<') %}
- image: "{{cilium_init_image_repo}}:{{cilium_init_image_tag}}"
-{% else %}
- image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
-{% endif %}
- imagePullPolicy: {{ k8s_image_pull_policy }}
- name: clean-cilium-state
securityContext:
privileged: true
volumeMounts:
- - mountPath: /sys/fs/bpf
- name: bpf-maps
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: {{ cilium_cgroup_host_root }}
mountPropagation: HostToContainer
{% endif %}
- - mountPath: /var/run/cilium
- name: cilium-run
+ - name: cilium-run
+ mountPath: /var/run/cilium
resources:
requests:
cpu: 100m
memory: 100Mi
- priorityClassName: system-node-critical
restartPolicy: Always
+ priorityClassName: system-node-critical
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
@@ -312,36 +314,26 @@ spec:
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- - hostPath:
+ - name: cilium-run
+ hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
- name: cilium-run
# To keep state between restarts / upgrades for bpf maps
- - hostPath:
+ - name: bpf-maps
+ hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
- name: bpf-maps
-# pkg/workloads was deprecated in 1.6, removed in 1.7
-# https://github.com/cilium/cilium/pull/9447
-{% if cilium_version | regex_replace('v') is version('1.7', '<') %}
-{% if container_manager == 'docker' %}
- # To read docker events from the node
- - hostPath:
- path: /var/run/docker.sock
- type: Socket
- name: docker-socket
-{% else %}
- # To read crio events from the node
- - hostPath:
- path: {{ cri_socket }}
- type: Socket
- name: {{ container_manager }}-socket
-{% endif %}
-{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11', '>=') %}
# To mount cgroup2 filesystem on the host
- name: hostproc
@@ -355,34 +347,34 @@ spec:
type: DirectoryOrCreate
{% endif %}
# To install cilium cni plugin in the host
- - hostPath:
+ - name: cni-path
+ hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
- name: cni-path
# To install cilium cni configuration in the host
- - hostPath:
+ - name: etc-cni-netd
+ hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
- name: etc-cni-netd
# To be able to load kernel modules
- - hostPath:
+ - name: lib-modules
+ hostPath:
path: /lib/modules
- name: lib-modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- - hostPath:
+ - name: xtables-lock
+ hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: xtables-lock
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- - configMap:
+ - name: etcd-config-path
+ configMap:
+ name: cilium-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: etcd-config
path: etcd.config
- name: cilium-config
- name: etcd-config-path
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
@@ -391,21 +383,22 @@ spec:
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
+ secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
- secretName: cilium-clustermesh
# To read the configuration from the config map
- - configMap:
+ - name: cilium-config-path
+ configMap:
name: cilium-config
- name: cilium-config-path
{% if cilium_ip_masq_agent_enable %}
- - configMap:
+ - name: ip-masq-agent
+ configMap:
name: ip-masq-agent
+ optional: true
items:
- key: config
path: ip-masq-agent
- name: ip-masq-agent
{% endif %}
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
@@ -420,21 +413,12 @@ spec:
sources:
- secret:
name: hubble-server-certs
+ optional: true
items:
+ - key: ca.crt
+ path: client-ca.crt
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- optional: true
- - configMap:
- name: hubble-ca-cert
- items:
- - key: ca.crt
- path: client-ca.crt
- optional: true
{% endif %}
- updateStrategy:
- rollingUpdate:
- # Specifies the maximum number of Pods that can be unavailable during the update process.
- maxUnavailable: 2
- type: RollingUpdate
diff --git a/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2
index 6486cfd93..dd97bbfc5 100644
--- a/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2
+++ b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2
@@ -1,6 +1,6 @@
---
# Source: cilium/templates/hubble-generate-certs-cronjob.yaml
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: hubble-generate-certs
@@ -37,7 +37,7 @@ spec:
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io"
+ - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
diff --git a/roles/network_plugin/cilium/templates/hubble/job.yml.j2 b/roles/network_plugin/cilium/templates/hubble/job.yml.j2
index d4213bd39..38a42bfd0 100644
--- a/roles/network_plugin/cilium/templates/hubble/job.yml.j2
+++ b/roles/network_plugin/cilium/templates/hubble/job.yml.j2
@@ -33,7 +33,7 @@ spec:
- "--hubble-ca-config-map-create=true"
- "--hubble-ca-config-map-name=hubble-ca-cert"
- "--hubble-server-cert-generate=true"
- - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io"
+ - "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
- "--hubble-server-cert-validity-duration=94608000s"
- "--hubble-server-cert-secret-name=hubble-server-certs"
- "--hubble-relay-client-cert-generate=true"
diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
index bb55fd4da..7c73b095d 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
@@ -5,64 +5,11 @@ metadata:
name: flannel
namespace: kube-system
---
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: psp.flannel.unprivileged
- annotations:
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
- seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
-{% if podsecuritypolicy_enabled and apparmor_enabled %}
- apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
- apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
-{% endif %}
-spec:
- privileged: false
- volumes:
- - configMap
- - secret
- - emptyDir
- - hostPath
- allowedHostPaths:
- - pathPrefix: "/etc/cni/net.d"
- - pathPrefix: "/etc/kube-flannel"
- - pathPrefix: "/run/flannel"
- readOnlyRootFilesystem: false
- # Users and groups
- runAsUser:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- fsGroup:
- rule: RunAsAny
- # Privilege Escalation
- allowPrivilegeEscalation: false
- defaultAllowPrivilegeEscalation: false
- # Capabilities
- allowedCapabilities: ['NET_ADMIN']
- defaultAddCapabilities: []
- requiredDropCapabilities: []
- # Host namespaces
- hostPID: false
- hostIPC: false
- hostNetwork: true
- hostPorts:
- - min: 0
- max: 65535
- # SELinux
- seLinux:
- # SELinux is unused in CaaSP
- rule: 'RunAsAny'
----
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- - apiGroups: ['extensions']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 3fe3cab84..59cecb257 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -79,7 +79,7 @@ spec:
securityContext:
privileged: false
capabilities:
- add: ["NET_ADMIN"]
+ add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
@@ -89,11 +89,15 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: EVENT_QUEUE_DEPTH
+ value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -146,6 +150,10 @@ spec:
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
- name: cni-plugin
hostPath:
path: /opt/cni/bin
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index d1d0535c0..36b1e9ff8 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -2,7 +2,8 @@
- name: remove-node | Delete node
command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
- when: inventory_hostname in groups['k8s_cluster']
+ # ignore servers that are not nodes
+ when: inventory_hostname in groups['k8s_cluster'] and kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines
retries: "{{ delete_node_retries }}"
# Sometimes the api-server can have a short window of indisponibility when we delete a master node
delay: "{{ delete_node_delay_seconds }}"
diff --git a/scale.yml b/scale.yml
index 533b97727..8e79bfa03 100644
--- a/scale.yml
+++ b/scale.yml
@@ -27,7 +27,7 @@
import_playbook: facts.yml
- name: Generate the etcd certificates beforehand
- hosts: etcd
+ hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@@ -38,7 +38,10 @@
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
- when: etcd_deployment_type != "kubeadm"
+ when:
+ - etcd_deployment_type != "kubeadm"
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
@@ -60,7 +63,14 @@
- { role: kubernetes/preinstall, tags: preinstall }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- - { role: etcd, tags: etcd, etcd_cluster_setup: false, when: "etcd_deployment_type != 'kubeadm'" }
+ - role: etcd
+ tags: etcd
+ vars:
+ etcd_cluster_setup: false
+ when:
+ - etcd_deployment_type != "kubeadm"
+ - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+ - kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
diff --git a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
index 5e73068c2..5b38495be 100644
--- a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
@@ -83,6 +83,13 @@ images:
converted: true
tag: "latest"
+ rockylinux-9:
+ filename: Rocky-9-GenericCloud-9.0-20220830.0.x86_64.qcow2
+ url: https://download.rockylinux.org/pub/rocky/9.0/images/x86_64/Rocky-9-GenericCloud-9.0-20220830.0.x86_64.qcow2
+ checksum: sha256:f02570e0ad3653df7f56baa8157739dbe92a003234acd5824dcf94d24694e20b
+ converted: true
+ tag: "latest"
+
debian-9:
filename: debian-9-openstack-amd64.qcow2
url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
diff --git a/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml b/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml
index 2e6a46b2e..f2c82368e 100644
--- a/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/defaults/main.yml
@@ -27,6 +27,7 @@ cloud_init:
centos-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
almalinux-8: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
rockylinux-8: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
+ rockylinux-9: "I2Nsb3VkLWNvbmZpZwpwYWNrYWdlczoKIC0gc3VkbwogLSBob3N0bmFtZQpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
debian-9: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
debian-10: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
debian-11: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
diff --git a/tests/files/packet_almalinux8-calico.yml b/tests/files/packet_almalinux8-calico.yml
index f9ab1299d..1df4a64e5 100644
--- a/tests/files/packet_almalinux8-calico.yml
+++ b/tests/files/packet_almalinux8-calico.yml
@@ -12,6 +12,7 @@ loadbalancer_apiserver_type: haproxy
# NTP mangement
ntp_enabled: true
+ntp_timezone: Etc/UTC
ntp_manage_config: true
ntp_tinker_panic: true
ntp_force_sync_immediately: true
diff --git a/tests/files/packet_rockylinux9-calico.yml b/tests/files/packet_rockylinux9-calico.yml
new file mode 100644
index 000000000..17e6ae58b
--- /dev/null
+++ b/tests/files/packet_rockylinux9-calico.yml
@@ -0,0 +1,11 @@
+---
+# Instance settings
+cloud_image: rockylinux-9
+mode: default
+vm_memory: 3072Mi
+
+# Kubespray settings
+metrics_server_enabled: true
+dashboard_namespace: "kube-dashboard"
+dashboard_enabled: true
+loadbalancer_apiserver_type: haproxy
diff --git a/tests/files/packet_ubuntu20-calico-aio-hardening.yml b/tests/files/packet_ubuntu20-calico-aio-hardening.yml
new file mode 100644
index 000000000..c013f7954
--- /dev/null
+++ b/tests/files/packet_ubuntu20-calico-aio-hardening.yml
@@ -0,0 +1,96 @@
+---
+# Instance settings
+cloud_image: ubuntu-2004
+mode: aio
+
+# Kubespray settings
+auto_renew_certificates: true
+
+# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
+kube_proxy_mode: iptables
+enable_nodelocaldns: False
+
+# The followings are for hardening
+## kube-apiserver
+authorization_modes: ['Node', 'RBAC']
+# AppArmor-based OS
+kube_apiserver_feature_gates: ['AppArmor=true']
+kube_apiserver_request_timeout: 120s
+kube_apiserver_service_account_lookup: true
+
+# enable kubernetes audit
+kubernetes_audit: true
+audit_log_path: "/var/log/kube-apiserver-log.json"
+audit_log_maxage: 30
+audit_log_maxbackups: 10
+audit_log_maxsize: 100
+
+tls_min_version: VersionTLS12
+tls_cipher_suites:
+ - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+
+# enable encryption at rest
+kube_encrypt_secret_data: true
+kube_encryption_resources: [secrets]
+kube_encryption_algorithm: "secretbox"
+
+kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
+kube_apiserver_admission_control_config_file: true
+# EventRateLimit plugin configuration
+kube_apiserver_admission_event_rate_limits:
+ limit_1:
+ type: Namespace
+ qps: 50
+ burst: 100
+ cache_size: 2000
+ limit_2:
+ type: User
+ qps: 50
+ burst: 100
+kube_profiling: false
+
+## kube-controller-manager
+kube_controller_manager_bind_address: 127.0.0.1
+kube_controller_terminated_pod_gc_threshold: 50
+# AppArmor-based OS
+kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
+
+## kube-scheduler
+kube_scheduler_bind_address: 127.0.0.1
+kube_kubeadm_scheduler_extra_args:
+ profiling: false
+# AppArmor-based OS
+kube_scheduler_feature_gates: ["AppArmor=true"]
+
+## etcd
+etcd_deployment_type: kubeadm
+
+## kubelet
+kubelet_authentication_token_webhook: true
+kube_read_only_port: 0
+kubelet_rotate_server_certificates: true
+kubelet_protect_kernel_defaults: true
+kubelet_event_record_qps: 1
+kubelet_rotate_certificates: true
+kubelet_streaming_connection_idle_timeout: "5m"
+kubelet_make_iptables_util_chains: true
+kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
+kubelet_seccomp_default: true
+kubelet_systemd_hardening: true
+# In case you have multiple interfaces in your
+# control plane nodes and you want to specify the right
+# IP addresses, kubelet_secure_addresses allows you
+# to specify the IP from which the kubelet
+# will receive the packets.
+# kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
+
+# additional configurations
+kube_owner: root
+kube_cert_group: root
+
+# create a default Pod Security Configuration and deny running of insecure pods
+# kube-system namespace is exempted by default
+kube_pod_security_use_default: true
+kube_pod_security_default_enforce: restricted
diff --git a/tests/files/packet_ubuntu22-aio-docker.yml b/tests/files/packet_ubuntu22-aio-docker.yml
index c72ed7ffc..b78c6b0a4 100644
--- a/tests/files/packet_ubuntu22-aio-docker.yml
+++ b/tests/files/packet_ubuntu22-aio-docker.yml
@@ -15,4 +15,3 @@ enable_nodelocaldns: False
container_manager: docker
etcd_deployment_type: docker
resolvconf_mode: docker_dns
-cri_dockerd_enabled: true
diff --git a/tests/scripts/check_typo.sh b/tests/scripts/check_typo.sh
new file mode 100755
index 000000000..cdcf49bc7
--- /dev/null
+++ b/tests/scripts/check_typo.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+# cd to the root directory of kubespray
+cd $(dirname $0)/../../
+
+rm ./misspell*
+
+set -e
+wget https://github.com/client9/misspell/releases/download/v0.3.4/misspell_0.3.4_linux_64bit.tar.gz
+tar -zxvf ./misspell_0.3.4_linux_64bit.tar.gz
+chmod 755 ./misspell
+git ls-files | xargs ./misspell -error
diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh
index 5947309df..eac0afe72 100755
--- a/tests/scripts/testcases_run.sh
+++ b/tests/scripts/testcases_run.sh
@@ -47,6 +47,13 @@ if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then
CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3"
fi
+ENABLE_040_TEST="true"
+if [[ "$CI_JOB_NAME" =~ "hardening" ]]; then
+ # TODO: We need to remove this condition by finding alternative container
+ # image instead of netchecker which doesn't work at hardening environments.
+ ENABLE_040_TEST="false"
+fi
+
# Check out latest tag if testing upgrade
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
# Checkout the CI vars file so it is available
@@ -85,7 +92,9 @@ ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIO
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
## Advanced DNS checks
-ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
+if [ "${ENABLE_040_TEST}" = "true" ]; then
+ ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
+fi
## Kubernetes conformance tests
ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index c193db6de..499064d7f 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -1,8 +1,8 @@
---
- hosts: kube_control_plane[0]
vars:
- test_image_repo: registry.k8s.io/busybox
- test_image_tag: latest
+ test_image_repo: registry.k8s.io/e2e-test-images/agnhost
+ test_image_tag: "2.40"
tasks:
- name: Force binaries directory for Flatcar Container Linux by Kinvolk
@@ -53,12 +53,33 @@
retries: 5
delay: 5
- - name: Run 2 busybox pods in test ns
- command: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null"
+ - name: Run 2 agnhost pods in test ns
+ shell:
+ cmd: |
+ cat <