Merge branch 'kubernetes-sigs:master' into master

This commit is contained in:
Vitaly Yakovenko 2022-10-24 18:17:00 +02:00 committed by GitHub
commit 4741cb1ee5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
186 changed files with 2399 additions and 1521 deletions

View file

@ -8,7 +8,7 @@ stages:
- deploy-special - deploy-special
variables: variables:
KUBESPRAY_VERSION: v2.19.0 KUBESPRAY_VERSION: v2.20.0
FAILFASTCI_NAMESPACE: 'kargo-ci' FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true" ANSIBLE_FORCE_COLOR: "true"

View file

@ -75,6 +75,13 @@ check-readme-versions:
script: script:
- tests/scripts/check_readme_versions.sh - tests/scripts/check_readme_versions.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix: ci-matrix:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]

View file

@ -51,6 +51,11 @@ packet_ubuntu20-aio-docker:
extends: .packet_pr extends: .packet_pr
when: on_success when: on_success
packet_ubuntu20-calico-aio-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu18-calico-aio: packet_ubuntu18-calico-aio:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet_pr
@ -151,6 +156,11 @@ packet_rockylinux8-calico:
extends: .packet_pr extends: .packet_pr
when: on_success when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-docker: packet_almalinux8-docker:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet_pr

View file

@ -8,6 +8,7 @@ aliases:
- floryut - floryut
- oomichi - oomichi
- cristicalin - cristicalin
- liupeng0518
kubespray-reviewers: kubespray-reviewers:
- holmsten - holmsten
- bozzo - bozzo
@ -16,6 +17,7 @@ aliases:
- jayonlau - jayonlau
- cristicalin - cristicalin
- liupeng0518 - liupeng0518
- yankay
kubespray-emeritus_approvers: kubespray-emeritus_approvers:
- riverzhang - riverzhang
- atoms - atoms

View file

@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this: You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession ```ShellSession
docker pull quay.io/kubespray/kubespray:v2.19.0 docker pull quay.io/kubespray/kubespray:v2.20.0
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \ --mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.19.0 bash quay.io/kubespray/kubespray:v2.20.0 bash
# Inside the container you may now run the kubespray playbooks: # Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
``` ```
@ -113,6 +113,7 @@ vagrant up
- [Air-Gap installation](docs/offline-environment.md) - [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md) - [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md) - [Hardening](docs/hardening.md)
- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md) - [Roadmap](docs/roadmap.md)
## Supported Linux Distributions ## Supported Linux Distributions
@ -120,13 +121,13 @@ vagrant up
- **Flatcar Container Linux by Kinvolk** - **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch - **Debian** Bullseye, Buster, Jessie, Stretch
- **Ubuntu** 16.04, 18.04, 20.04, 22.04 - **Ubuntu** 16.04, 18.04, 20.04, 22.04
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8) - **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
- **Fedora** 35, 36 - **Fedora** 35, 36
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md)) - **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed - **openSUSE** Leap 15.x/Tumbleweed
- **Oracle Linux** 7, [8](docs/centos.md#centos-8) - **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
- **Alma Linux** [8](docs/centos.md#centos-8) - **Alma Linux** [8, 9](docs/centos.md#centos-8)
- **Rocky Linux** [8](docs/centos.md#centos-8) - **Rocky Linux** [8, 9](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md)) - **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)) - **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
@ -135,8 +136,8 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components ## Supported Components
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.4 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.3
- [etcd](https://github.com/etcd-io/etcd) v3.5.4 - [etcd](https://github.com/etcd-io/etcd) v3.5.5
- [docker](https://www.docker.com/) v20.10 (see note) - [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.6.8 - [containerd](https://containerd.io/) v1.6.8
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
@ -144,20 +145,20 @@ Note: Upstart/SysV init based OS types are not supported.
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1 - [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
- [calico](https://github.com/projectcalico/calico) v3.23.3 - [calico](https://github.com/projectcalico/calico) v3.23.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.11.7 - [cilium](https://github.com/cilium/cilium) v1.12.1
- [flannel](https://github.com/flannel-io/flannel) v0.18.1 - [flannel](https://github.com/flannel-io/flannel) v0.19.2
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1 - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
- [multus](https://github.com/intel/multus-cni) v3.8 - [multus](https://github.com/intel/multus-cni) v3.8
- [weave](https://github.com/weaveworks/weave) v2.8.1 - [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2 - [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
- Application - Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0 - [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1
- [coredns](https://github.com/coredns/coredns) v1.8.6 - [coredns](https://github.com/coredns/coredns) v1.8.6
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.4.0
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3 - [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- [argocd](https://argoproj.github.io/) v2.4.7 - [argocd](https://argoproj.github.io/) v2.4.15
- [helm](https://helm.sh/) v3.9.2 - [helm](https://helm.sh/) v3.9.4
- [metallb](https://metallb.universe.tf/) v0.12.1 - [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1 - [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin - Storage Plugin
@ -177,7 +178,7 @@ Note: Upstart/SysV init based OS types are not supported.
## Requirements ## Requirements
- **Minimum required version of Kubernetes is v1.22** - **Minimum required version of Kubernetes is v1.23**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** - **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**. - The target servers are configured to allow **IPv4 forwarding**.
@ -246,6 +247,7 @@ See also [Network checker](docs/netcheck.md).
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests ## CI Tests

View file

@ -9,5 +9,7 @@
# #
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/ # INSTRUCTIONS AT https://kubernetes.io/security/
atoms
mattymo mattymo
floryut
oomichi
cristicalin

View file

@ -35,7 +35,7 @@
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine } - { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" } - { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd - hosts: etcd:kube_control_plane
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}" environment: "{{ proxy_disable_env }}"
@ -59,7 +59,10 @@
vars: vars:
etcd_cluster_setup: false etcd_cluster_setup: false
etcd_events_cluster_setup: false etcd_events_cluster_setup: false
when: etcd_deployment_type != "kubeadm" when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster - hosts: k8s_cluster
gather_facts: False gather_facts: False

View file

@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | |`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | |`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | |`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | |`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | |`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
@ -294,7 +295,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration `number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
using the `k8s_nodes` variable. using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
For example: For example:
@ -314,6 +316,7 @@ k8s_nodes = {
"az" = "sto3" "az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445" "flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true "floating_ip" = true
"extra_groups" = "calico_rr"
} }
} }
``` ```

View file

@ -84,6 +84,7 @@ module "compute" {
supplementary_node_groups = var.supplementary_node_groups supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports worker_allowed_ports = var.worker_allowed_ports
bastion_allowed_ports = var.bastion_allowed_ports
use_access_ip = var.use_access_ip use_access_ip = var.use_access_ip
master_server_group_policy = var.master_server_group_policy master_server_group_policy = var.master_server_group_policy
node_server_group_policy = var.node_server_group_policy node_server_group_policy = var.node_server_group_policy
@ -96,6 +97,7 @@ module "compute" {
network_router_id = module.network.router_id network_router_id = module.network.router_id
network_id = module.network.network_id network_id = module.network.network_id
use_existing_network = var.use_existing_network use_existing_network = var.use_existing_network
private_subnet_id = module.network.subnet_id
depends_on = [ depends_on = [
module.network.subnet_id module.network.subnet_id

View file

@ -82,6 +82,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
security_group_id = openstack_networking_secgroup_v2.bastion[0].id security_group_id = openstack_networking_secgroup_v2.bastion[0].id
} }
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
count = length(var.bastion_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_v2" "k8s" { resource "openstack_networking_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s" name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes" description = "${var.cluster_name} - Kubernetes"
@ -195,6 +206,9 @@ resource "openstack_networking_port_v2" "bastion_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -245,6 +259,9 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -305,6 +322,9 @@ resource "openstack_networking_port_v2" "k8s_masters_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -363,6 +383,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -423,6 +446,9 @@ resource "openstack_networking_port_v2" "etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -477,6 +503,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -531,6 +560,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -586,6 +618,9 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -646,6 +681,9 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -701,6 +739,9 @@ resource "openstack_networking_port_v2" "k8s_nodes_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -742,7 +783,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
metadata = { metadata = {
ssh_user = var.ssh_user ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}" kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
depends_on = var.network_router_id depends_on = var.network_router_id
use_access_ip = var.use_access_ip use_access_ip = var.use_access_ip
} }
@ -760,6 +801,9 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id

View file

@ -136,6 +136,10 @@ variable "worker_allowed_ports" {
type = list type = list
} }
variable "bastion_allowed_ports" {
type = list
}
variable "use_access_ip" {} variable "use_access_ip" {}
variable "master_server_group_policy" { variable "master_server_group_policy" {
@ -185,3 +189,7 @@ variable "port_security_enabled" {
variable "force_null_port_security" { variable "force_null_port_security" {
type = bool type = bool
} }
variable "private_subnet_id" {
type = string
}

View file

@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
] ]
} }
variable "bastion_allowed_ports" {
type = list(any)
default = []
}
variable "use_access_ip" { variable "use_access_ip" {
default = 1 default = 1
} }

View file

@ -281,7 +281,7 @@ For more information about Ansible and bastion hosts, read
## Mitogen ## Mitogen
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation. Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
## Beyond ansible 2.9 ## Beyond ansible 2.9

View file

@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes. In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located. For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
The following variables need to be set: The following variables need to be set as follow:
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
you'll need to edit the inventory and add a hostvar `local_as` by node. ```yml
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
nat_outgoing: false # (optional) NAT outgoing (default value: true).
```
And you'll need to edit the inventory and add a hostvar `local_as` by node.
```ShellSession ```ShellSession
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
@ -171,7 +176,7 @@ node5
[rack0:vars] [rack0:vars]
cluster_id="1.0.0.1" cluster_id="1.0.0.1"
calcio_rr_id=rr1 calico_rr_id=rr1
calico_group_id=rr1 calico_group_id=rr1
``` ```
@ -200,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
calico_healthhost: "0.0.0.0" calico_healthhost: "0.0.0.0"
``` ```
### Optional : Configure VXLAN hardware Offload
Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this:
```yml
calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver)
```
### Optional : Configure Calico Node probe timeouts ### Optional : Configure Calico Node probe timeouts
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this: Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:

View file

@ -2,12 +2,12 @@
## CentOS 7 ## CentOS 7
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above. The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported. Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
## CentOS 8 ## CentOS 8
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8) CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
The only tested configuration for now is using Calico CNI The only tested configuration for now is using Calico CNI
You need to add `calico_iptables_backend: "NFT"` to your configuration. You need to add `calico_iptables_backend: "NFT"` to your configuration.

View file

@ -16,6 +16,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
@ -35,6 +36,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@ -54,6 +56,7 @@ fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View file

@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version ## Choose Cilium version
```yml ```yml
cilium_version: v1.11.3 cilium_version: v1.12.1
``` ```
## Add variable to config ## Add variable to config

View file

@ -39,4 +39,65 @@ containerd_registries:
image_command_tool: crictl image_command_tool: crictl
``` ```
### Containerd Runtimes
Containerd supports multiple runtime configurations that can be used with
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
details of containerd configuration.
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
```yaml
containerd_runc_runtime:
name: runc
type: "io.containerd.runc.v2"
engine: ""
root: ""
options:
systemdCgroup: "false"
binaryName: /usr/local/bin/my-runc
base_runtime_spec: cri-base.json
```
Further runtimes can be configured with `containerd_additional_runtimes`, which
is a list of such dictionaries.
Default runtime can be changed by setting `containerd_default_runtime`.
#### base_runtime_spec
`base_runtime_spec` key in a runtime dictionary can be used to explicitly
specify a runtime spec json file. We ship the default one which is generated
with `ctr oci spec > /etc/containerd/cri-base.json`. It will be used if you set
`base_runtime_spec: cri-base.json`. The main advantage of doing so is the presence of
`rlimits` section in this configuration, which will restrict the maximum number
of file descriptors(open files) per container to 1024.
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
```yaml
containerd_base_runtime_specs:
cri-spec-custom.json: |
{
"ociVersion": "1.0.2-dev",
"process": {
"user": {
"uid": 0,
...
```
The files in this dict will be placed in containerd config directory,
`/etc/containerd` by default. The files can then be referenced by filename in a
runtime:
```yaml
containerd_runc_runtime:
name: runc
base_runtime_spec: cri-spec-custom.json
...
```
[containerd]: https://containerd.io/ [containerd]: https://containerd.io/
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
[runtime-spec]: https://github.com/opencontainers/runtime-spec

View file

@ -19,6 +19,14 @@ ndots value to be used in ``/etc/resolv.conf``
It is important to note that multiple search domains combined with high ``ndots`` It is important to note that multiple search domains combined with high ``ndots``
values lead to poor performance of DNS stack, so please choose it wisely. values lead to poor performance of DNS stack, so please choose it wisely.
## dns_timeout
timeout value to be used in ``/etc/resolv.conf``
## dns_attempts
attempts value to be used in ``/etc/resolv.conf``
### searchdomains ### searchdomains
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
@ -26,6 +34,8 @@ Custom search domains to be added in addition to the cluster search domains (``d
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit. to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
`remove_default_searchdomains: true` will remove the default cluster search domains.
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
additional search domains. Please take this into the accounts for the limits. additional search domains. Please take this into the accounts for the limits.
@ -62,6 +72,13 @@ coredns_external_zones:
nameservers: nameservers:
- 192.168.0.53 - 192.168.0.53
cache: 0 cache: 0
- zones:
- mydomain.tld
nameservers:
- 10.233.0.3
cache: 5
rewrite:
- name stop website.tld website.namespace.svc.cluster.local
``` ```
or as INI or as INI
@ -263,7 +280,8 @@ nodelocaldns_secondary_skew_seconds: 5
* the ``searchdomains`` have a limitation of a 6 names and 256 chars * the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual length. Due to default ``svc, default.svc`` subdomains, the actual
limits are a 4 names and 239 chars respectively. limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
added you are back to 6 names.
* the ``nameservers`` have a limitation of a 3 servers, although there * the ``nameservers`` have a limitation of a 3 servers, although there
is a way to mitigate that with the ``upstream_dns_servers``, is a way to mitigate that with the ``upstream_dns_servers``,

View file

@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
--- ---
## kube-apiserver ## kube-apiserver
authorization_modes: ['Node','RBAC'] authorization_modes: ['Node', 'RBAC']
# AppArmor-based OS # AppArmor-based OS
#kube_apiserver_feature_gates: ['AppArmor=true'] # kube_apiserver_feature_gates: ['AppArmor=true']
kube_apiserver_request_timeout: 120s kube_apiserver_request_timeout: 120s
kube_apiserver_service_account_lookup: true kube_apiserver_service_account_lookup: true
@ -60,7 +60,7 @@ kube_profiling: false
kube_controller_manager_bind_address: 127.0.0.1 kube_controller_manager_bind_address: 127.0.0.1
kube_controller_terminated_pod_gc_threshold: 50 kube_controller_terminated_pod_gc_threshold: 50
# AppArmor-based OS # AppArmor-based OS
#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"] # kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"] kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
## kube-scheduler ## kube-scheduler
@ -68,13 +68,12 @@ kube_scheduler_bind_address: 127.0.0.1
kube_kubeadm_scheduler_extra_args: kube_kubeadm_scheduler_extra_args:
profiling: false profiling: false
# AppArmor-based OS # AppArmor-based OS
#kube_scheduler_feature_gates: ["AppArmor=true"] # kube_scheduler_feature_gates: ["AppArmor=true"]
## etcd ## etcd
etcd_deployment_type: kubeadm etcd_deployment_type: kubeadm
## kubelet ## kubelet
kubelet_authorization_mode_webhook: true
kubelet_authentication_token_webhook: true kubelet_authentication_token_webhook: true
kube_read_only_port: 0 kube_read_only_port: 0
kubelet_rotate_server_certificates: true kubelet_rotate_server_certificates: true
@ -83,8 +82,15 @@ kubelet_event_record_qps: 1
kubelet_rotate_certificates: true kubelet_rotate_certificates: true
kubelet_streaming_connection_idle_timeout: "5m" kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true kubelet_make_iptables_util_chains: true
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"] kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
kubelet_seccomp_default: true kubelet_seccomp_default: true
kubelet_systemd_hardening: true
# In case you have multiple interfaces in your
# control plane nodes and you want to specify the right
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations # additional configurations
kube_owner: root kube_owner: root
@ -103,6 +109,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this). * The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>). * The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top. * If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
![kubelet hardening](img/kubelet-hardening.png)
Once you have the file properly filled, you can run the **Ansible** command to start the installation: Once you have the file properly filled, you can run the **Ansible** command to start the installation:

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

View file

@ -2,7 +2,7 @@
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers. It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode. The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
## Prerequisites ## Prerequisites
@ -70,7 +70,7 @@ metallb_peers:
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement. When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses). See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range` In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
```yaml ```yaml
metallb_speaker_enabled: false metallb_speaker_enabled: false

66
docs/mirror.md Normal file
View file

@ -0,0 +1,66 @@
# Public Download Mirror
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
## Configuring Kubespray to use a mirror site
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
```shell
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
```
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
You can replace the `m.daocloud.io` with any site you want.
## Example Usage Full Steps
You can follow the full steps to use the kubesray with mirror. for example:
Install Ansible according to Ansible installation guide then run the following steps:
```shell
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# Use the download mirror
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
EOF
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
```
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
## Community-run mirror sites
DaoCloud(China)
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)

View file

@ -12,7 +12,7 @@ ntp_enabled: true
The NTP service would be enabled and sync time automatically. The NTP service would be enabled and sync time automatically.
## Custimize the NTP configure file ## Customize the NTP configure file
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file. In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
@ -26,6 +26,15 @@ ntp_servers:
- "3.your-ntp-server.org iburst" - "3.your-ntp-server.org iburst"
``` ```
## Setting the TimeZone
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
```ShellSession
ntp_enabled: true
ntp_timezone: Etc/UTC
```
## Advanced Configure ## Advanced Configure
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true. Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.

View file

@ -23,7 +23,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl" kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet" kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
# etcd is optional if you **DON'T** use etcd_deployment=host # etcd is optional if you **DON'T** use etcd_deployment=host
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz" etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# If using Calico # If using Calico

View file

@ -34,52 +34,6 @@ Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expecte
Unless you are using calico or kube-router you can now run the playbook. Unless you are using calico or kube-router you can now run the playbook.
**Additional step needed when using calico or kube-router:**
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.
## The external cloud provider ## The external cloud provider
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21. The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
@ -156,3 +110,49 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider. - Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
- Run the `cluster.yml` playbook - Run the `cluster.yml` playbook
## Additional step needed when using calico or kube-router
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.

View file

@ -14,8 +14,8 @@ hands-on guide to get started with Kubespray.
## Cluster Details ## Cluster Details
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x * [kubespray](https://github.com/kubernetes-sigs/kubespray)
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9 * [kubernetes](https://github.com/kubernetes/kubernetes)
## Prerequisites ## Prerequisites

View file

@ -28,6 +28,7 @@ Some variables of note include:
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode * *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes version * *kube_version* - Specify a given Kubernetes version
* *searchdomains* - Array of DNS domains to search when looking up hostnames * *searchdomains* - Array of DNS domains to search when looking up hostnames
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
* *nameservers* - Array of nameservers to use for DNS lookup * *nameservers* - Array of nameservers to use for DNS lookup
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled. * *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
@ -166,6 +167,7 @@ variables to match your requirements.
addition to Kubespray deployed DNS addition to Kubespray deployed DNS
* *nameservers* - Array of DNS servers configured for use by hosts * *nameservers* - Array of DNS servers configured for use by hosts
* *searchdomains* - Array of up to 4 search domains * *searchdomains* - Array of up to 4 search domains
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns * *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
For more information, see [DNS For more information, see [DNS
@ -175,25 +177,46 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *docker_options* - Commonly used to set * *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000`` ``--insecure-registry=myregistry.mydomain:5000``
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install. * *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin. * *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin. * *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars. [Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars.
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a * *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node. that correspond to each node.
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. * *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
By default autodetection is used to match container manager configuration. By default autodetection is used to match container manager configuration.
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`. `systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates * *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches. from the kube-apiserver when the certificate expiration approaches.
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates * *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches. from the kube-apiserver when the certificate expiration approaches.
**Note** that server certificates are **not** approved automatically. Approve them manually **Note** that server certificates are **not** approved automatically. Approve them manually
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like (`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp). [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed. * *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host. * *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
* *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable.
Example:
The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`.
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. * *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars. For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can only be defined as a dict: *node_labels* can only be defined as a dict:

View file

@ -37,6 +37,7 @@ You need to source the vSphere credentials you use to deploy your machines that
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy | | vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state | | vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state | | vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run |
## Usage example ## Usage example

View file

@ -35,6 +35,11 @@ loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES ### OTHER OPTIONAL VARIABLES
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false
## Upstream dns servers ## Upstream dns servers
# upstream_dns_servers: # upstream_dns_servers:
# - 8.8.8.8 # - 8.8.8.8

View file

@ -82,8 +82,8 @@
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" # docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" # docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
### Containerd ### Containerd
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd" # containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" # containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
# containerd_debian_repo_repokey: 'YOURREPOKEY' # containerd_debian_repo_repokey: 'YOURREPOKEY'
## Ubuntu ## Ubuntu

View file

@ -7,13 +7,18 @@
# upcloud_csi_provisioner_image_tag: "v3.1.0" # upcloud_csi_provisioner_image_tag: "v3.1.0"
# upcloud_csi_attacher_image_tag: "v3.4.0" # upcloud_csi_attacher_image_tag: "v3.4.0"
# upcloud_csi_resizer_image_tag: "v1.4.0" # upcloud_csi_resizer_image_tag: "v1.4.0"
# upcloud_csi_plugin_image_tag: "v0.2.1" # upcloud_csi_plugin_image_tag: "v0.3.3"
# upcloud_csi_node_image_tag: "v2.5.0" # upcloud_csi_node_image_tag: "v2.5.0"
# upcloud_tolerations: [] # upcloud_tolerations: []
## Storage class options ## Storage class options
# expand_persistent_volumes: true
# parameters:
# tier: maxiops # or hdd
# storage_classes: # storage_classes:
# - name: standard # - name: standard
# is_default: true # is_default: true
# expand_persistent_volumes: true
# parameters:
# tier: maxiops
# - name: hdd
# is_default: false
# expand_persistent_volumes: true
# parameters:
# tier: hdd

View file

@ -161,7 +161,7 @@ cert_manager_enabled: false
# MetalLB deployment # MetalLB deployment
metallb_enabled: false metallb_enabled: false
metallb_speaker_enabled: true metallb_speaker_enabled: "{{ metallb_enabled }}"
# metallb_ip_range: # metallb_ip_range:
# - "10.5.0.50-10.5.0.99" # - "10.5.0.50-10.5.0.99"
# metallb_pool_name: "loadbalanced" # metallb_pool_name: "loadbalanced"
@ -210,7 +210,7 @@ metallb_speaker_enabled: true
# my_asn: 4200000000 # my_asn: 4200000000
argocd_enabled: false argocd_enabled: false
# argocd_version: v2.4.7 # argocd_version: v2.4.15
# argocd_namespace: argocd # argocd_namespace: argocd
# Default password: # Default password:
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli # - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli

View file

@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.24.4 kube_version: v1.25.3
# Where the binaries will be downloaded. # Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G) # Note: ensure that you've enough disk space (about 1G)
@ -160,6 +160,14 @@ kube_encrypt_secret_data: false
cluster_name: cluster.local cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2 ndots: 2
# dns_timeout: 2
# dns_attempts: 2
# Custom search domains to be added in addition to the default cluster search domains
# searchdomains:
# - svc.{{ cluster_name }}
# - default.svc.{{ cluster_name }}
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
# remove_default_searchdomains: false
# Can be coredns, coredns_dual, manual or none # Can be coredns, coredns_dual, manual or none
dns_mode: coredns dns_mode: coredns
# Set manual server if using a custom cluster DNS server # Set manual server if using a custom cluster DNS server
@ -185,6 +193,13 @@ nodelocaldns_secondary_skew_seconds: 5
# nameservers: # nameservers:
# - 192.168.0.53 # - 192.168.0.53
# cache: 0 # cache: 0
# - zones:
# - mydomain.tld
# nameservers:
# - 10.233.0.3
# cache: 5
# rewrite:
# - name website.tld website.namespace.svc.cluster.local
# Enable k8s_external plugin for CoreDNS # Enable k8s_external plugin for CoreDNS
enable_coredns_k8s_external: false enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local coredns_k8s_external_zone: k8s_external.local
@ -324,3 +339,9 @@ event_ttl_duration: "1h0m0s"
auto_renew_certificates: false auto_renew_certificates: false
# First Monday of each month # First Monday of each month
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" # auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
# kubeadm patches path
kubeadm_patches:
enabled: false
source_dir: "{{ inventory_dir }}/patches"
dest_dir: "{{ kube_config_dir }}/patches"

View file

@ -60,7 +60,7 @@ calico_pool_blocksize: 26
# - x.x.x.x/24 # - x.x.x.x/24
# - y.y.y.y/32 # - y.y.y.y/32
# Adveritse Service LoadBalancer IPs # Advertise Service LoadBalancer IPs
# calico_advertise_service_loadbalancer_ips: # calico_advertise_service_loadbalancer_ips:
# - x.x.x.x/24 # - x.x.x.x/24
# - y.y.y.y/16 # - y.y.y.y/16
@ -99,7 +99,7 @@ calico_pool_blocksize: 26
# calico_vxlan_vni: 4096 # calico_vxlan_vni: 4096
# calico_vxlan_port: 4789 # calico_vxlan_port: 4789
# Cenable eBPF mode # Enable eBPF mode
# calico_bpf_enabled: false # calico_bpf_enabled: false
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: # If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
@ -109,6 +109,10 @@ calico_pool_blocksize: 26
# calico_ip_auto_method: "interface=eth.*" # calico_ip_auto_method: "interface=eth.*"
# calico_ip6_auto_method: "interface=eth.*" # calico_ip6_auto_method: "interface=eth.*"
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the hosts interface for MTU auto-detection.
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
# Choose the iptables insert mode for Calico: "Insert" or "Append". # Choose the iptables insert mode for Calico: "Insert" or "Append".
# calico_felix_chaininsertmode: Insert # calico_felix_chaininsertmode: Insert

View file

@ -1,5 +1,5 @@
--- ---
# cilium_version: "v1.11.7" # cilium_version: "v1.12.1"
# Log-level # Log-level
# cilium_debug: false # cilium_debug: false
@ -118,6 +118,7 @@
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ # https://docs.cilium.io/en/stable/concepts/networking/masquerading/
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded # By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
# cilium_ip_masq_agent_enable: false # cilium_ip_masq_agent_enable: false
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded ### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
# cilium_non_masquerade_cidrs: # cilium_non_masquerade_cidrs:
# - 10.0.0.0/8 # - 10.0.0.0/8

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10257'

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10259'

View file

@ -7,13 +7,13 @@ addusers:
etcd: etcd:
name: etcd name: etcd
comment: "Etcd user" comment: "Etcd user"
createhome: no create_home: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
kube: kube:
name: kube name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
createhome: no create_home: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
@ -24,4 +24,4 @@ adduser:
comment: "{{ user.comment|default(None) }}" comment: "{{ user.comment|default(None) }}"
shell: "{{ user.shell|default(None) }}" shell: "{{ user.shell|default(None) }}"
system: "{{ user.system|default(None) }}" system: "{{ user.system|default(None) }}"
createhome: "{{ user.createhome|default(None) }}" create_home: "{{ user.create_home|default(None) }}"

View file

@ -7,7 +7,7 @@
- name: User | Create User - name: User | Create User
user: user:
comment: "{{ user.comment|default(omit) }}" comment: "{{ user.comment|default(omit) }}"
createhome: "{{ user.createhome|default(omit) }}" create_home: "{{ user.create_home|default(omit) }}"
group: "{{ user.group|default(user.name) }}" group: "{{ user.group|default(user.name) }}"
home: "{{ user.home|default(omit) }}" home: "{{ user.home|default(omit) }}"
shell: "{{ user.shell|default(omit) }}" shell: "{{ user.shell|default(omit) }}"

View file

@ -5,4 +5,4 @@ addusers:
shell: /sbin/nologin shell: /sbin/nologin
system: yes system: yes
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
createhome: no create_home: no

View file

@ -2,14 +2,14 @@
addusers: addusers:
- name: etcd - name: etcd
comment: "Etcd user" comment: "Etcd user"
createhome: yes create_home: yes
home: "{{ etcd_data_dir }}" home: "{{ etcd_data_dir }}"
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
- name: kube - name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
createhome: no create_home: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"

View file

@ -2,14 +2,14 @@
addusers: addusers:
- name: etcd - name: etcd
comment: "Etcd user" comment: "Etcd user"
createhome: yes create_home: yes
home: "{{ etcd_data_dir }}" home: "{{ etcd_data_dir }}"
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
- name: kube - name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
createhome: no create_home: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"

View file

@ -84,6 +84,14 @@
or is_fedora_coreos or is_fedora_coreos
or ansible_distribution == "Fedora") or ansible_distribution == "Fedora")
- name: Set os_family fact for Kylin Linux Advanced Server
set_fact:
ansible_os_family: "RedHat"
ansible_distribution_major_version: "8"
when: ansible_distribution == "Kylin Linux Advanced Server"
tags:
- facts
- name: Install ceph-commmon package - name: Install ceph-commmon package
package: package:
name: name:

View file

@ -4,7 +4,7 @@
containerd_package: 'containerd.io' containerd_package: 'containerd.io'
yum_repo_dir: /etc/yum.repos.d yum_repo_dir: /etc/yum.repos.d
# Keep minimal repo information arround for cleanup # Keep minimal repo information around for cleanup
containerd_repo_info: containerd_repo_info:
repos: repos:

View file

@ -2,6 +2,9 @@
containerd_storage_dir: "/var/lib/containerd" containerd_storage_dir: "/var/lib/containerd"
containerd_state_dir: "/run/containerd" containerd_state_dir: "/run/containerd"
containerd_systemd_dir: "/etc/systemd/system/containerd.service.d" containerd_systemd_dir: "/etc/systemd/system/containerd.service.d"
# The default value is not -999 here because containerd's oom_score_adj has been
# set to the -999 even if containerd_oom_score is 0.
# Ref: https://github.com/kubernetes-sigs/kubespray/pull/9275#issuecomment-1246499242
containerd_oom_score: 0 containerd_oom_score: 0
# containerd_default_runtime: "runc" # containerd_default_runtime: "runc"
@ -12,6 +15,7 @@ containerd_runc_runtime:
type: "io.containerd.runc.v2" type: "io.containerd.runc.v2"
engine: "" engine: ""
root: "" root: ""
# base_runtime_spec: cri-base.json # use this to limit number of file descriptors per container
options: options:
systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}" systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
@ -22,6 +26,9 @@ containerd_additional_runtimes: []
# engine: "" # engine: ""
# root: "" # root: ""
containerd_base_runtime_specs:
cri-base.json: "{{ lookup('file', 'cri-base.json') }}"
containerd_grpc_max_recv_message_size: 16777216 containerd_grpc_max_recv_message_size: 16777216
containerd_grpc_max_send_message_size: 16777216 containerd_grpc_max_send_message_size: 16777216
@ -46,3 +53,9 @@ containerd_registry_auth: []
# - registry: 10.0.0.2:5000 # - registry: 10.0.0.2:5000
# username: user # username: user
# password: pass # password: pass
# Configure containerd service
containerd_limit_proc_num: "infinity"
containerd_limit_core: "infinity"
containerd_limit_open_file_num: "infinity"
containerd_limit_mem_lock: "infinity"

View file

@ -0,0 +1,214 @@
{
"ociVersion": "1.0.2-dev",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs"
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/run",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
]
},
"cgroupsPath": "/default",
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
},
{
"type": "network"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/asound",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

View file

@ -0,0 +1 @@
---

View file

@ -84,6 +84,15 @@
notify: restart containerd notify: restart containerd
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined
- name: containerd | Write base_runtime_specs
copy:
content: "{{ item.value }}"
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
owner: "root"
mode: 0644
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
notify: restart containerd
- name: containerd | Copy containerd config file - name: containerd | Copy containerd config file
template: template:
src: config.toml.j2 src: config.toml.j2

View file

@ -22,19 +22,15 @@ oom_score = {{ containerd_oom_score }}
default_runtime_name = "{{ containerd_default_runtime | default('runc') }}" default_runtime_name = "{{ containerd_default_runtime | default('runc') }}"
snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}" snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ containerd_runc_runtime.name }}] {% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
runtime_type = "{{ containerd_runc_runtime.type }}"
runtime_engine = "{{ containerd_runc_runtime.engine}}"
runtime_root = "{{ containerd_runc_runtime.root }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ containerd_runc_runtime.name }}.options]
{% for key, value in containerd_runc_runtime.options.items() %}
{{ key }} = {{ value }}
{% endfor %}
{% for runtime in containerd_additional_runtimes %}
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}]
runtime_type = "{{ runtime.type }}" runtime_type = "{{ runtime.type }}"
runtime_engine = "{{ runtime.engine }}" runtime_engine = "{{ runtime.engine }}"
runtime_root = "{{ runtime.root }}" runtime_root = "{{ runtime.root }}"
{% if runtime.base_runtime_spec is defined %}
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
{% endif %}
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options]
{% for key, value in runtime.options.items() %} {% for key, value in runtime.options.items() %}
{{ key }} = {{ value }} {{ key }} = {{ value }}

View file

@ -28,9 +28,10 @@ Restart=always
RestartSec=5 RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead # Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting. # in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity LimitNPROC={{ containerd_limit_proc_num }}
LimitCORE=infinity LimitCORE={{ containerd_limit_core }}
LimitNOFILE=infinity LimitNOFILE={{ containerd_limit_open_file_num }}
LimitMEMLOCK={{ containerd_limit_mem_lock }}
# Comment TasksMax if your systemd version does not supports it. # Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version. # Only systemd 226 and above support this version.
TasksMax=infinity TasksMax=infinity

View file

@ -1,7 +1,7 @@
--- ---
crio_cgroup_manager: "{{ kubelet_cgroup_driver | default('systemd') }}" crio_cgroup_manager: "{{ kubelet_cgroup_driver | default('systemd') }}"
crio_conmon: "/usr/bin/conmon" crio_conmon: "{{ bin_dir }}/conmon"
crio_enable_metrics: false crio_enable_metrics: false
crio_log_level: "info" crio_log_level: "info"
crio_metrics_port: "9090" crio_metrics_port: "9090"
@ -37,17 +37,10 @@ crio_stream_port: "10010"
crio_required_version: "{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}" crio_required_version: "{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
crio_kubernetes_version_matrix:
"1.24": "1.24"
"1.23": "1.23"
"1.22": "1.22"
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.24') }}"
# The crio_runtimes variable defines a list of OCI compatible runtimes. # The crio_runtimes variable defines a list of OCI compatible runtimes.
crio_runtimes: crio_runtimes:
- name: runc - name: runc
path: /usr/bin/runc path: "{{ bin_dir }}/runc"
type: oci type: oci
root: /run/runc root: /run/runc
@ -65,7 +58,7 @@ kata_runtimes:
# crun is a fast and low-memory footprint OCI Container Runtime fully written in C. # crun is a fast and low-memory footprint OCI Container Runtime fully written in C.
crun_runtime: crun_runtime:
name: crun name: crun
path: /usr/bin/crun path: "{{ bin_dir }}/crun"
type: oci type: oci
root: /run/crun root: /run/crun
@ -76,20 +69,10 @@ youki_runtime:
type: oci type: oci
root: /run/youki root: /run/youki
# When this is true, CRI-O package repositories are added. Set this to false when using an # TODO(cristicalin): remove this after 2.21
# environment with preconfigured CRI-O package repositories.
crio_add_repos: true
# Allow crio offline installation
crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
# Allow crio offline installation
crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
# skopeo need for save/load images when download_run_once=true
skopeo_packages:
- "skopeo"
# Configure the cri-o pids limit, increase this for heavily multi-threaded workloads # Configure the cri-o pids limit, increase this for heavily multi-threaded workloads
# see https://github.com/cri-o/cri-o/issues/1921 # see https://github.com/cri-o/cri-o/issues/1921
crio_pids_limit: 1024 crio_pids_limit: 1024
@ -102,3 +85,19 @@ crio_subuid_start: 2130706432
crio_subuid_length: 16777216 crio_subuid_length: 16777216
crio_subgid_start: 2130706432 crio_subgid_start: 2130706432
crio_subgid_length: 16777216 crio_subgid_length: 16777216
# cri-o binary files
crio_bin_files:
- conmon
- crio
- crio-status
- pinns
# cri-o manual files
crio_man_files:
5:
- crio.conf
- crio.conf.d
8:
- crio
- crio-status

View file

@ -1,3 +1,5 @@
--- ---
dependencies: dependencies:
- role: container-engine/crictl - role: container-engine/crictl
- role: container-engine/runc
- role: container-engine/skopeo

View file

@ -0,0 +1,17 @@
{
"cniVersion": "0.2.0",
"name": "mynet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "172.19.0.0/24",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
}

View file

@ -0,0 +1,10 @@
{
"metadata": {
"name": "runc1"
},
"image": {
"image": "quay.io/kubespray/hello-world:latest"
},
"log_path": "runc1.0.log",
"linux": {}
}

View file

@ -0,0 +1,10 @@
{
"metadata": {
"name": "runc1",
"namespace": "default",
"attempt": 1,
"uid": "hdishd83djaidwnduwk28bcsb"
},
"linux": {},
"log_directory": "/tmp"
}

View file

@ -7,24 +7,38 @@ lint: |
set -e set -e
yamllint -c ../../../.yamllint . yamllint -c ../../../.yamllint .
platforms: platforms:
- name: ubuntu2004 - name: ubuntu20
box: generic/ubuntu2004 box: generic/ubuntu2004
cpus: 2 cpus: 2
memory: 1024 memory: 1024
groups: groups:
- kube_control_plane - kube_control_plane
- kube_node
- k8s_cluster
- name: almalinux8 - name: almalinux8
box: almalinux/8 box: almalinux/8
cpus: 2 cpus: 2
memory: 1024 memory: 1024
groups: groups:
- kube_control_plane - kube_control_plane
- kube_node
- k8s_cluster
- name: fedora - name: fedora
box: fedora/35-cloud-base box: fedora/36-cloud-base
cpus: 2 cpus: 2
memory: 1024 memory: 1024
groups: groups:
- kube_control_plane - kube_control_plane
- kube_node
- k8s_cluster
- name: debian10
box: generic/debian10
cpus: 2
memory: 1024
groups:
- kube_control_plane
- kube_node
- k8s_cluster
provisioner: provisioner:
name: ansible name: ansible
env: env:

View file

@ -2,5 +2,51 @@
- name: Prepare - name: Prepare
hosts: all hosts: all
gather_facts: False gather_facts: False
become: true
vars:
ignore_assert_errors: true
roles: roles:
- role: kubespray-defaults
- role: bootstrap-os - role: bootstrap-os
- role: kubernetes/preinstall
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare CNI
hosts: all
gather_facts: False
become: true
vars:
ignore_assert_errors: true
kube_network_plugin: cni
roles:
- role: kubespray-defaults
- role: network_plugin/cni
tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: 0644
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory
file:
path: /etc/cni/net.d
state: directory
owner: "{{ kube_owner }}"
mode: 0755
- name: Setup CNI
copy:
src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}"
owner: root
mode: 0644
with_items:
- 10-mynet.conf

View file

@ -19,3 +19,17 @@ def test_run(host):
cmd = host.command(crictl + " --runtime-endpoint " + path + " version") cmd = host.command(crictl + " --runtime-endpoint " + path + " version")
assert cmd.rc == 0 assert cmd.rc == 0
assert "RuntimeName: cri-o" in cmd.stdout assert "RuntimeName: cri-o" in cmd.stdout
def test_run_pod(host):
runtime = "runc"
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/runc1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View file

@ -0,0 +1,119 @@
---
# TODO(cristicalin): drop this file after 2.21
- name: CRI-O kubic repo name for debian os family
set_fact:
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
when: ansible_os_family == "Debian"
- name: Remove legacy CRI-O kubic apt repo key
apt_key:
url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
state: absent
when: crio_kubic_debian_repo_name is defined
- name: Remove legacy CRI-O kubic apt repo
apt_repository:
repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
state: absent
filename: devel-kubic-libcontainers-stable
when: crio_kubic_debian_repo_name is defined
- name: Remove legacy CRI-O kubic cri-o apt repo
apt_repository:
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
state: absent
filename: devel-kubic-libcontainers-stable-cri-o
when: crio_kubic_debian_repo_name is defined
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
state: absent
when:
- ansible_os_family == "RedHat"
- ansible_distribution not in ["Amazon", "Fedora"]
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
state: absent
when:
- ansible_os_family == "RedHat"
- ansible_distribution not in ["Amazon", "Fedora"]
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages
baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
state: absent
when:
- ansible_distribution in ["Fedora"]
- not is_ostree
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }}"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
state: absent
when:
- ansible_distribution in ["Fedora"]
- not is_ostree
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages
baseurl: http://{{ crio_download_base }}/CentOS_7/
state: absent
when: ansible_distribution in ["Amazon"]
- name: Remove legacy CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }}"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
state: absent
when: ansible_distribution in ["Amazon"]
- name: Disable modular repos for CRI-O
ini_file:
path: "/etc/yum.repos.d/{{ item.repo }}.repo"
section: "{{ item.section }}"
option: enabled
value: 0
mode: 0644
become: true
when: is_ostree
loop:
- repo: "fedora-updates-modular"
section: "updates-modular"
- repo: "fedora-modular"
section: "fedora-modular"
# Disable any older module version if we enabled them before
- name: Disable CRI-O ex module
command: "rpm-ostree ex module disable cri-o:{{ item }}"
become: true
when:
- is_ostree
- ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
with_items:
- 1.22
- 1.23
- 1.24
- name: cri-o | remove installed packages
package:
name: "{{ item }}"
state: absent
when: not is_ostree
with_items:
- cri-o
- cri-o-runc
- oci-systemd-hook

View file

@ -1,179 +0,0 @@
---
- block:
- name: Add Debian Backports apt repo
apt_repository:
repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
state: present
filename: debian-backports
- name: Set libseccomp2 pin priority to apt_preferences on Debian buster
copy:
content: |
Package: libseccomp2
Pin: release a={{ ansible_distribution_release }}-backports
Pin-Priority: 1001
dest: "/etc/apt/preferences.d/libseccomp2"
owner: "root"
mode: 0644
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
- name: CRI-O kubic repo name for debian os family
set_fact:
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
when: ansible_os_family == "Debian"
- name: Add CRI-O kubic apt repo key
apt_key:
url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
state: present
when: crio_kubic_debian_repo_name is defined
register: apt_key_download
until: apt_key_download is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
environment: "{{ proxy_env }}"
- name: Add CRI-O kubic apt repo
apt_repository:
repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
state: present
filename: devel-kubic-libcontainers-stable
when: crio_kubic_debian_repo_name is defined
- name: Add CRI-O kubic cri-o apt repo
apt_repository:
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
state: present
filename: devel-kubic-libcontainers-stable-cri-o
when: crio_kubic_debian_repo_name is defined
- name: Check that amzn2-extras.repo exists
stat:
path: /etc/yum.repos.d/amzn2-extras.repo
register: amzn2_extras_file_stat
when: ansible_distribution in ["Amazon"]
- name: Find docker repo in amzn2-extras.repo file
lineinfile:
dest: /etc/yum.repos.d/amzn2-extras.repo
line: "[amzn2extra-docker]"
check_mode: yes
register: amzn2_extras_docker_repo
when:
- ansible_distribution in ["Amazon"]
- amzn2_extras_file_stat.stat.exists
- name: Remove docker repository
ini_file:
dest: /etc/yum.repos.d/amzn2-extras.repo
section: amzn2extra-docker
option: enabled
value: "0"
backup: yes
mode: 0644
when:
- ansible_distribution in ["Amazon"]
- amzn2_extras_file_stat.stat.exists
- not amzn2_extras_docker_repo.changed
- name: Add container-selinux yum repo
yum_repository:
name: copr:copr.fedorainfracloud.org:lsm5:container-selinux
file: _copr_lsm5-container-selinux.repo
description: Copr repo for container-selinux owned by lsm5
baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/
gpgcheck: yes
gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no
when: ansible_distribution in ["Amazon"]
- name: Add CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
gpgcheck: yes
gpgkey: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key
keepcache: "0"
when:
- ansible_os_family == "RedHat"
- ansible_distribution not in ["Amazon", "Fedora"]
- name: Add CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
gpgcheck: yes
gpgkey: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key"
when:
- ansible_os_family == "RedHat"
- ansible_distribution not in ["Amazon", "Fedora"]
- name: Add CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages
baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
gpgcheck: yes
gpgkey: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key
keepcache: "0"
when:
- ansible_distribution in ["Fedora"]
- not is_ostree
- name: Add CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }}"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
gpgcheck: yes
gpgkey: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key"
when:
- ansible_distribution in ["Fedora"]
- not is_ostree
- name: Add CRI-O kubic yum repo
yum_repository:
name: devel_kubic_libcontainers_stable
description: Stable Releases of Upstream github.com/containers packages
baseurl: http://{{ crio_download_base }}/CentOS_7/
gpgcheck: yes
gpgkey: http://{{ crio_download_base }}/CentOS_7/repodata/repomd.xml.key
keepcache: "0"
when: ansible_distribution in ["Amazon"]
- name: Add CRI-O kubic yum repo
yum_repository:
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
description: "CRI-O {{ crio_version }}"
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
gpgcheck: yes
gpgkey: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/repodata/repomd.xml.key"
when: ansible_distribution in ["Amazon"]
- name: Enable modular repos for CRI-O
ini_file:
path: "/etc/yum.repos.d/{{ item.repo }}.repo"
section: "{{ item.section }}"
option: enabled
value: 1
mode: 0644
become: true
when: is_ostree
loop:
- repo: "fedora-updates-modular"
section: "updates-modular"
- repo: "fedora-modular"
section: "fedora-modular"
- name: Enable CRI-O ex module
command: "rpm-ostree ex module enable cri-o:{{ crio_version }}"
become: true
when:
- is_ostree
- ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')

View file

@ -1,5 +1,5 @@
--- ---
- name: check if fedora coreos - name: cri-o | check if fedora coreos
stat: stat:
path: /run/ostree-booted path: /run/ostree-booted
get_attributes: no get_attributes: no
@ -7,57 +7,48 @@
get_mime: no get_mime: no
register: ostree register: ostree
- name: set is_ostree - name: cri-o | set is_ostree
set_fact: set_fact:
is_ostree: "{{ ostree.stat.exists }}" is_ostree: "{{ ostree.stat.exists }}"
- name: get ostree version - name: cri-o | get ostree version
shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'" shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
args: args:
executable: /bin/bash executable: /bin/bash
register: ostree_version register: ostree_version
when: is_ostree when: is_ostree
- name: gather os specific variables - name: cri-o | Download cri-o
include_vars: "{{ item }}" include_tasks: "../../../download/tasks/download_file.yml"
with_first_found: vars:
- files: download: "{{ download_defaults | combine(downloads.crio) }}"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts
- name: import crio repo - name: cri-o | special handling for amazon linux
import_tasks: "crio_repo.yml" import_tasks: "setup-amazon.yaml"
when: crio_add_repos when: ansible_distribution in ["Amazon"]
- name: Build a list of crio runtimes with Katacontainers runtimes - name: cri-o | clean up reglacy repos
import_tasks: "cleanup.yaml"
- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
set_fact: set_fact:
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}" crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
when: when:
- kata_containers_enabled - kata_containers_enabled
- name: Build a list of crio runtimes with crun runtime - name: cri-o | build a list of crio runtimes with crun runtime
set_fact: set_fact:
crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}" crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
when: when:
- crun_enabled - crun_enabled
- name: Build a list of crio runtimes with youki runtime - name: cri-o | build a list of crio runtimes with youki runtime
set_fact: set_fact:
crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}" crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
when: when:
- youki_enabled - youki_enabled
- name: Make sure needed folders exist in the system - name: cri-o | make sure needed folders exist in the system
with_items: with_items:
- /etc/crio - /etc/crio
- /etc/containers - /etc/containers
@ -67,98 +58,47 @@
state: directory state: directory
mode: 0755 mode: 0755
- name: Install cri-o config - name: cri-o | install cri-o config
template: template:
src: crio.conf.j2 src: crio.conf.j2
dest: /etc/crio/crio.conf dest: /etc/crio/crio.conf
mode: 0644 mode: 0644
register: config_install register: config_install
- name: Install config.json - name: cri-o | install config.json
template: template:
src: config.json.j2 src: config.json.j2
dest: /etc/crio/config.json dest: /etc/crio/config.json
mode: 0644 mode: 0644
register: reg_auth_install register: reg_auth_install
- name: Add skopeo pkg to install - name: cri-o | copy binaries
set_fact: copy:
crio_packages: "{{ crio_packages + skopeo_packages }}" src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
when: dest: "{{ bin_dir }}/{{ item }}"
- not skip_downloads|default(false) mode: 0755
- download_run_once remote_src: true
- name: Add libseccomp2 package from Debian Backports to install
set_fact:
crio_packages: "{{ crio_debian_buster_backports_packages + crio_packages }}"
when:
- ansible_distribution == "Debian"
- ansible_distribution_version == "10"
- name: Remove dpkg hold
dpkg_selections:
name: "{{ item | split ('=') | first }}"
selection: install
when:
- ansible_pkg_mgr == 'apt'
changed_when: false
with_items: with_items:
- "{{ crio_packages }}" - "{{ crio_bin_files }}"
notify: restart crio
- name: Install cri-o packages - name: cri-o | copy service file
package: copy:
name: "{{ item }}" src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
state: present dest: /etc/systemd/system/crio.service
when: not is_ostree mode: 0755
with_items: "{{ crio_packages }}" remote_src: true
register: package_install notify: restart crio
until: package_install is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
# This is required to ensure any apt upgrade will not break kubernetes - name: cri-o | copy default policy
- name: Tell Debian hosts not to change the cri-o version with apt upgrade copy:
dpkg_selections: src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
name: "{{ item | split ('=') | first }}" dest: /etc/containers/policy.json
selection: hold mode: 0755
when: remote_src: true
- ansible_pkg_mgr == 'apt' notify: restart crio
changed_when: false
with_items:
- "{{ crio_packages }}"
- name: Check if already installed - name: cri-o | copy mounts.conf
stat:
path: "/bin/crio"
get_attributes: no
get_checksum: no
get_mime: no
register: need_bootstrap_crio
when: is_ostree
- name: Install cri-o packages with ostree
command: "rpm-ostree install {{ crio_packages|join(' ') }}"
when:
- is_ostree
- not need_bootstrap_crio.stat.exists
become: true
- name: Reboot immediately for updated ostree
reboot:
become: true
when:
- is_ostree
- not need_bootstrap_crio.stat.exists
- name: Remove example CNI configs
file:
path: "/etc/cni/net.d/{{ item }}"
state: absent
loop:
- 100-crio-bridge.conf
- 200-loopback.conf
- name: Copy mounts.conf
copy: copy:
src: mounts.conf src: mounts.conf
dest: /etc/containers/mounts.conf dest: /etc/containers/mounts.conf
@ -167,15 +107,28 @@
- ansible_os_family == 'RedHat' - ansible_os_family == 'RedHat'
notify: restart crio notify: restart crio
- name: Create directory for oci hooks - name: cri-o | create directory for oci hooks
file: file:
path: /etc/containers/oci/hooks.d path: /etc/containers/oci/hooks.d
state: directory state: directory
owner: root owner: root
mode: 0755 mode: 0755
- name: cri-o | set overlay driver
ini_file:
dest: /etc/containers/storage.conf
section: storage
option: "{{ item.option }}"
value: "{{ item.value }}"
mode: 0644
with_items:
- option: driver
value: '"overlay"'
- option: graphroot
value: '"/var/lib/containers/storage"'
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel # metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
- name: Set metacopy mount options correctly - name: cri-o | set metacopy mount options correctly
ini_file: ini_file:
dest: /etc/containers/storage.conf dest: /etc/containers/storage.conf
section: storage.options.overlay section: storage.options.overlay
@ -183,14 +136,14 @@
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}' value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
mode: 0644 mode: 0644
- name: Create directory registries configs - name: cri-o | create directory registries configs
file: file:
path: /etc/containers/registries.conf.d path: /etc/containers/registries.conf.d
state: directory state: directory
owner: root owner: root
mode: 0755 mode: 0755
- name: Write registries configs - name: cri-o | write registries configs
template: template:
src: registry.conf.j2 src: registry.conf.j2
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf" dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
@ -198,14 +151,14 @@
loop: "{{ crio_registries }}" loop: "{{ crio_registries }}"
notify: restart crio notify: restart crio
- name: Configure unqualified registry settings - name: cri-o | configure unqualified registry settings
template: template:
src: unqualified.conf.j2 src: unqualified.conf.j2
dest: "/etc/containers/registries.conf.d/01-unqualified.conf" dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
mode: 0644 mode: 0644
notify: restart crio notify: restart crio
- name: Write cri-o proxy drop-in - name: cri-o | write cri-o proxy drop-in
template: template:
src: http-proxy.conf.j2 src: http-proxy.conf.j2
dest: /etc/systemd/system/crio.service.d/http-proxy.conf dest: /etc/systemd/system/crio.service.d/http-proxy.conf
@ -213,7 +166,7 @@
notify: restart crio notify: restart crio
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined
- name: Configure the uid/gid space for user namespaces - name: cri-o | configure the uid/gid space for user namespaces
lineinfile: lineinfile:
path: '{{ item.path }}' path: '{{ item.path }}'
line: '{{ item.entry }}' line: '{{ item.entry }}'
@ -227,7 +180,7 @@
loop_control: loop_control:
label: '{{ item.path }}' label: '{{ item.path }}'
- name: Ensure crio service is started and enabled - name: cri-o | ensure crio service is started and enabled
service: service:
name: crio name: crio
daemon_reload: true daemon_reload: true
@ -235,18 +188,17 @@
state: started state: started
register: service_start register: service_start
- name: Trigger service restart only when needed - name: cri-o | trigger service restart only when needed
service: # noqa 503 service: # noqa 503
name: crio name: crio
state: restarted state: restarted
when: when:
- config_install.changed - config_install.changed
- reg_auth_install.changed - reg_auth_install.changed
- not package_install.changed
- not service_start.changed - not service_start.changed
- name: Verify that crio is running - name: cri-o | verify that crio is running
command: "crio-status info" command: "{{ bin_dir }}/crio-status info"
register: get_crio_info register: get_crio_info
until: get_crio_info is succeeded until: get_crio_info is succeeded
changed_when: false changed_when: false

View file

@ -0,0 +1,38 @@
---
- name: Check that amzn2-extras.repo exists
stat:
path: /etc/yum.repos.d/amzn2-extras.repo
register: amzn2_extras_file_stat
- name: Find docker repo in amzn2-extras.repo file
lineinfile:
dest: /etc/yum.repos.d/amzn2-extras.repo
line: "[amzn2extra-docker]"
check_mode: yes
register: amzn2_extras_docker_repo
when:
- amzn2_extras_file_stat.stat.exists
- name: Remove docker repository
ini_file:
dest: /etc/yum.repos.d/amzn2-extras.repo
section: amzn2extra-docker
option: enabled
value: "0"
backup: yes
mode: 0644
when:
- amzn2_extras_file_stat.stat.exists
- not amzn2_extras_docker_repo.changed
- name: Add container-selinux yum repo
yum_repository:
name: copr:copr.fedorainfracloud.org:lsm5:container-selinux
file: _copr_lsm5-container-selinux.repo
description: Copr repo for container-selinux owned by lsm5
baseurl: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/epel-7-$basearch/
gpgcheck: yes
gpgkey: https://download.copr.fedorainfracloud.org/results/lsm5/container-selinux/pubkey.gpg
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no

View file

@ -1 +0,0 @@
centos-8.yml

View file

@ -1,15 +0,0 @@
---
crio_storage_driver: "overlay"
crio_versioned_pkg:
"1.24":
- "cri-o-1.24.*"
"1.23":
- "cri-o-1.23.*"
"1.22":
- "cri-o-1.22.*"
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"

View file

@ -1,12 +0,0 @@
---
crio_versioned_pkg:
"1.24":
- "cri-o-1.24.*"
"1.23":
- "cri-o-1.23.*"
"1.22":
- "cri-o-1.22.*"
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"

View file

@ -1,12 +0,0 @@
---
crio_versioned_pkg:
"1.24":
- "cri-o-1.24.*"
"1.23":
- "cri-o-1.23.*"
"1.22":
- "cri-o-1.22.*"
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}"

View file

@ -1,6 +0,0 @@
---
crio_packages:
- containers-basic
crio_conmon: /usr/libexec/crio/conmon
crio_seccomp_profile: /usr/share/defaults/crio/seccomp.json

View file

@ -1,25 +0,0 @@
---
crio_versioned_pkg:
"1.24":
- "cri-o=1.24*"
- cri-o-runc
"1.23":
- "cri-o=1.23*"
- cri-o-runc
"1.22":
- "cri-o=1.22*"
- cri-o-runc
crio_debian_buster_backports_packages:
- "libseccomp2"
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
crio_packages: "{{ debian_crio_packages | default(default_crio_packages) }}"
# The crio_runtimes variable defines a list of OCI compatible runtimes.
crio_runtimes:
- name: runc
path: /usr/sbin/runc
type: oci
root: /run/runc

View file

@ -1,5 +0,0 @@
---
crio_packages:
- cri-o
crio_version: 1.24

View file

@ -1,9 +0,0 @@
---
crio_packages:
- cri-o
crio_kubernetes_version_matrix:
"1.24": "1.23"
"1.23": "1.23"
"1.22": "1.22"
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.23') }}"

View file

@ -1 +0,0 @@
centos-8.yml

View file

@ -1,4 +0,0 @@
---
crio_packages:
- cri-o
- oci-systemd-hook

View file

@ -1 +0,0 @@
centos-8.yml

View file

@ -1,22 +0,0 @@
---
crio_versioned_pkg:
"1.24":
- "cri-o=1.24*"
- cri-o-runc
"1.23":
- "cri-o=1.23*"
- cri-o-runc
"1.22":
- "cri-o=1.22*"
- cri-o-runc
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
crio_packages: "{{ ubuntu_crio_packages | default(default_crio_packages) }}"
# The crio_runtimes variable defines a list of OCI compatible runtimes.
crio_runtimes:
- name: runc
path: /usr/sbin/runc
type: oci
root: /run/runc

View file

@ -18,17 +18,17 @@ docker_versioned_pkg:
'latest': docker-ce 'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
'20.10': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} 'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} 'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg: docker_cli_versioned_pkg:
'latest': docker-ce-cli 'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }} '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }} '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
'20.10': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} 'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce-cli=5:20.10.17~3-0~debian-{{ ansible_distribution_release|lower }} 'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
docker_package_info: docker_package_info:
pkgs: pkgs:

View file

@ -18,16 +18,16 @@ containerd_versioned_pkg:
docker_versioned_pkg: docker_versioned_pkg:
'latest': docker-ce 'latest': docker-ce
'19.03': docker-ce-19.03.15-3.fc{{ ansible_distribution_major_version }} '19.03': docker-ce-19.03.15-3.fc{{ ansible_distribution_major_version }}
'20.10': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }} '20.10': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
'stable': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }} 'stable': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
'edge': docker-ce-20.10.17-3.fc{{ ansible_distribution_major_version }} 'edge': docker-ce-20.10.20-3.fc{{ ansible_distribution_major_version }}
docker_cli_versioned_pkg: docker_cli_versioned_pkg:
'latest': docker-ce-cli 'latest': docker-ce-cli
'19.03': docker-ce-cli-19.03.15-3.fc{{ ansible_distribution_major_version }} '19.03': docker-ce-cli-19.03.15-3.fc{{ ansible_distribution_major_version }}
'20.10': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }} '20.10': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
'stable': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }} 'stable': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
'edge': docker-ce-cli-20.10.17-3.fc{{ ansible_distribution_major_version }} 'edge': docker-ce-cli-20.10.20-3.fc{{ ansible_distribution_major_version }}
docker_package_info: docker_package_info:
enablerepo: "docker-ce" enablerepo: "docker-ce"

View file

@ -20,17 +20,17 @@ docker_versioned_pkg:
'latest': docker-ce 'latest': docker-ce
'18.09': docker-ce-18.09.9-3.el7 '18.09': docker-ce-18.09.9-3.el7
'19.03': docker-ce-19.03.15-3.el7 '19.03': docker-ce-19.03.15-3.el7
'20.10': docker-ce-20.10.17-3.el7 '20.10': docker-ce-20.10.20-3.el7
'stable': docker-ce-20.10.17-3.el7 'stable': docker-ce-20.10.20-3.el7
'edge': docker-ce-20.10.17-3.el7 'edge': docker-ce-20.10.20-3.el7
docker_cli_versioned_pkg: docker_cli_versioned_pkg:
'latest': docker-ce-cli 'latest': docker-ce-cli
'18.09': docker-ce-cli-18.09.9-3.el7 '18.09': docker-ce-cli-18.09.9-3.el7
'19.03': docker-ce-cli-19.03.15-3.el7 '19.03': docker-ce-cli-19.03.15-3.el7
'20.10': docker-ce-cli-20.10.17-3.el7 '20.10': docker-ce-cli-20.10.20-3.el7
'stable': docker-ce-cli-20.10.17-3.el7 'stable': docker-ce-cli-20.10.20-3.el7
'edge': docker-ce-cli-20.10.17-3.el7 'edge': docker-ce-cli-20.10.20-3.el7
docker_package_info: docker_package_info:
enablerepo: "docker-ce" enablerepo: "docker-ce"

View file

@ -20,17 +20,17 @@ docker_versioned_pkg:
'latest': docker-ce 'latest': docker-ce
'18.09': docker-ce-3:18.09.9-3.el7 '18.09': docker-ce-3:18.09.9-3.el7
'19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }}
'20.10': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
'stable': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} 'stable': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
'edge': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} 'edge': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }}
docker_cli_versioned_pkg: docker_cli_versioned_pkg:
'latest': docker-ce-cli 'latest': docker-ce-cli
'18.09': docker-ce-cli-1:18.09.9-3.el7 '18.09': docker-ce-cli-1:18.09.9-3.el7
'19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }}
'20.10': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
'stable': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} 'stable': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
'edge': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} 'edge': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }}
docker_package_info: docker_package_info:
enablerepo: "docker-ce" enablerepo: "docker-ce"

View file

@ -18,17 +18,17 @@ docker_versioned_pkg:
'latest': docker-ce 'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'20.10': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} 'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} 'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg: docker_cli_versioned_pkg:
'latest': docker-ce-cli 'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }} '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }} '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'20.10': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'stable': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} 'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce-cli=5:20.10.17~3-0~ubuntu-{{ ansible_distribution_release|lower }} 'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_package_info: docker_package_info:
pkgs: pkgs:

View file

@ -459,7 +459,7 @@ enable_debug = {{ kata_containers_qemu_debug }}
# #
# If enabled, the default trace mode is "dynamic" and the # If enabled, the default trace mode is "dynamic" and the
# default trace type is "isolated". The trace mode and type are set # default trace type is "isolated". The trace mode and type are set
# explicity with the `trace_type=` and `trace_mode=` options. # explicitly with the `trace_type=` and `trace_mode=` options.
# #
# Notes: # Notes:
# #

View file

@ -1,4 +1,12 @@
--- ---
- name: runc | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
register: ostree
- name: runc | set is_ostree - name: runc | set is_ostree
set_fact: set_fact:
is_ostree: "{{ ostree.stat.exists }}" is_ostree: "{{ ostree.stat.exists }}"

View file

@ -0,0 +1,32 @@
---
- name: skopeo | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
register: ostree
- name: skopeo | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: skopeo | Uninstall skopeo package managed by package manager
package:
name: skopeo
state: absent
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
ignore_errors: true # noqa ignore-errors
- name: skopeo | Download skopeo binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.skopeo) }}"
- name: Copy skopeo binary from download dir
copy:
src: "{{ downloads.skopeo.dest }}"
dest: "{{ bin_dir }}/skopeo"
mode: 0755
remote_src: true

View file

@ -90,6 +90,7 @@
import_role: import_role:
name: container-engine/containerd name: container-engine/containerd
tasks_from: reset tasks_from: reset
handlers_from: reset
vars: vars:
service_name: containerd.service service_name: containerd.service
when: when:

View file

@ -110,35 +110,45 @@ calico_apiserver_version: "{{ calico_version }}"
typha_enabled: false typha_enabled: false
calico_apiserver_enabled: false calico_apiserver_enabled: false
flannel_version: "v0.18.1" flannel_version: "v0.19.2"
flannel_cni_version: "v1.1.0" flannel_cni_version: "v1.1.0"
cni_version: "v1.1.1" cni_version: "v1.1.1"
weave_version: 2.8.1 weave_version: 2.8.1
pod_infra_version: "3.6" pod_infra_version: "3.7"
cilium_version: "v1.11.7"
cilium_version: "v1.12.1"
cilium_enable_hubble: false
kube_ovn_version: "v1.9.7" kube_ovn_version: "v1.9.7"
kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}" kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}"
kube_router_version: "v1.5.1" kube_router_version: "v1.5.1"
multus_version: "v3.8-{{ image_arch }}" multus_version: "v3.8-{{ image_arch }}"
helm_version: "v3.9.2" helm_version: "v3.9.4"
nerdctl_version: "0.22.2" nerdctl_version: "0.22.2"
krew_version: "v0.4.3" krew_version: "v0.4.3"
skopeo_version: v1.10.0
# Get kubernetes major version (i.e. 1.17.4 => 1.17) # Get kubernetes major version (i.e. 1.17.4 => 1.17)
kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}" kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}"
etcd_supported_versions: etcd_supported_versions:
v1.25: "v3.5.5"
v1.24: "v3.5.4" v1.24: "v3.5.4"
v1.23: "v3.5.3" v1.23: "v3.5.3"
v1.22: "v3.5.3"
etcd_version: "{{ etcd_supported_versions[kube_major_version] }}" etcd_version: "{{ etcd_supported_versions[kube_major_version] }}"
crictl_supported_versions: crictl_supported_versions:
v1.25: "v1.25.0"
v1.24: "v1.24.0" v1.24: "v1.24.0"
v1.23: "v1.23.0" v1.23: "v1.23.0"
v1.22: "v1.22.0"
crictl_version: "{{ crictl_supported_versions[kube_major_version] }}" crictl_version: "{{ crictl_supported_versions[kube_major_version] }}"
crio_supported_versions:
v1.25: v1.25.1
v1.24: v1.24.3
v1.23: v1.23.2
crio_version: "{{ crio_supported_versions[kube_major_version] }}"
# Download URLs # Download URLs
kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
@ -149,6 +159,7 @@ calicoctl_download_url: "https://github.com/projectcalico/calico/releases/downlo
calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}" runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
@ -161,34 +172,63 @@ nerdctl_download_url: "https://github.com/containerd/nerdctl/releases/download/v
krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz" krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
skopeo_download_url: "https://github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
crictl_checksums: crictl_checksums:
arm: arm:
v1.25.0: c4efe3649af5542f2b07cdfc0be62e9e13c7bb846a9b59d57e190c764f28dae4
v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89 v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89
v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae
v1.22.0: b74f7cc52ce79c6d7fd776beb6353f4628e9c36f17ba2b8e6c48155714057f07
arm64: arm64:
v1.25.0: 651c939eca010bbf48cc3932516b194028af0893025f9e366127f5b50ad5c4f4
v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24 v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24
v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595 v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595
v1.22.0: a713c37fade0d96a989bc15ebe906e08ef5c8fe5e107c2161b0665e9963b770e
amd64: amd64:
v1.25.0: 86ab210c007f521ac4cdcbcf0ae3fb2e10923e65f16de83e0e1db191a07f0235
v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880 v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880
v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8 v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8
v1.22.0: 45e0556c42616af60ebe93bf4691056338b3ea0001c0201a6a8ff8b1dbc0652a
ppc64le: ppc64le:
v1.25.0: 1b77d1f198c67b2015104eee6fe7690465b8efa4675ea6b4b958c63d60a487e7
v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754 v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754
v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7 v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7
v1.22.0: c78bcea20c8f8ca3be0762cca7349fd2f1df520c304d0b2ef5e8fa514f64e45f
crio_archive_checksums:
arm:
v1.25.1: 0
v1.24.3: 0
v1.23.2: 0
arm64:
v1.25.1: add26675dc993b292024d007fd69980d8d1e75c675851d0cb687fe1dfd1f3008
v1.24.3: d8040602e03c90e4482b4ce97b63c2cf1301cd2afb0aa722342f40f3537a1a1f
v1.23.2: a866ccc3a062ac29906a619b9045a5e23b11fa9249f8802f8be0849491d01fbd
amd64:
v1.25.1: 49f98a38805740c40266a5bf3badc28e4ca725ccf923327c75c00fccc241f562
v1.24.3: 43f6e3a7ad6ae8cf05ed0f1e493578c28abf6a798aedb8ee9643ff7c25a68ca3
v1.23.2: 5c766dbf366a80f8b5dbc7a06d566f43e7cb0675186c50062df01f3b3cb5e526
ppc64le:
v1.25.1: 0
v1.24.3: 0
v1.23.2: 0
# Checksum # Checksum
# Kubernetes versions above Kubespray's current target version are untested and should be used with caution. # Kubernetes versions above Kubespray's current target version are untested and should be used with caution.
kubelet_checksums: kubelet_checksums:
arm: arm:
v1.25.3: 9745a48340ca61b00f0094e4b8ff210839edcf05420f0d57b3cb1748cb887060
v1.25.2: 995f885543fa61a08bd4f1008ba6d7417a1c45bd2a8e0f70c67a83e53b46eea5
v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2
v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa
v1.24.7: 3841e80f54ee5576928e799e4962231261bcdafe94868a310a8782da9a321da5
v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4
v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87
v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd
v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799 v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799
v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22 v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22
v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec
v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3 v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3
v1.23.13: 58f744247dbc8bca50b01ec1c25b0b5868736319f9cc8bf964fc2c1dd9eef0f9
v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3
v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0
v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03 v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03
v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a
v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11 v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11
@ -200,26 +240,22 @@ kubelet_checksums:
v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908 v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908
v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce
v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba
v1.22.13: c2230f8ff03102502b6f9f10dcc494af6c536fd8f1f9467aa42ba684da4e9106
v1.22.12: bb50b896769cb5e53101ef36e580095b8e546ea0dc194687e662824248b183ac
v1.22.11: 528e01a436b1b91edaa192ecc6befff5f5a2e17f9f340e3f4908b8bed1cebbe9
v1.22.10: 1510b508bd72c03f2576f07e652dfc0a12feda5a231a7dd792f32cd968153d8f
v1.22.9: 99eb1607e30d855b14da6f4f21d00d09dc6477c3e3bc1e88d00dea7961f3a488
v1.22.8: 7bc14bfca0efb5af6d7e56218f5c51862596cd9927843f8456a36e70e8e64da8
v1.22.7: 3709a794b33081b3f5f5ff1c6f9ab1614c3723d1da0a31c74c37ccdec456e94f
v1.22.6: 9957157375a343664db35be75281d610df85e1986a98cc3db1398bd0e53c36f4
v1.22.5: d901629aa537a0bff0907557810adb3cdc4a31f58035c57a45be011d836e2c8f
v1.22.4: 725a89d9752bbce91443b20108f784ae5efc950db26abb618eb4d0a2364b2ada
v1.22.3: 1c08dbe0b90d5b03fa386fadf5fa1af4db8e41bf8fa77888d54119ff188d130d
v1.22.2: 941e639b0f859eba65df0c66be82808ea6be697ed5dbf4df8e602dcbfa683aa3
v1.22.1: f42bc00f274be7ce0578b359cbccc48ead03894b599f5bf4d10e44c305fbab65
v1.22.0: 4354dc8db1d8ca336eb940dd73adcd3cf17cbdefbf11889602420f6ee9c6c4bb
arm64: arm64:
v1.25.3: 929d25fc3f901749b058141a9c624ff379759869e09df49b75657c0be3141091
v1.25.2: c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae
v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b
v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590
v1.24.7: d8bd38e595ca061c53d3b7d1daebe5b3cc1ad44c731666bd5e842d336077db4b
v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e
v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8
v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61 v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61
v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12 v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12
v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0 v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0
v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf
v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e
v1.23.13: 4e2297c9893d425bfcd80741b95fb1a5b59b4fd4f4bcf782ccab94760e653cdf
v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488
v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc
v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8 v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8
v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350 v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350
v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6 v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6
@ -231,26 +267,22 @@ kubelet_checksums:
v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f
v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d
v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9 v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9
v1.22.13: f8c1ec9fec6b36646ac05e1e26f0cd3e20395b500eca8ee3baeb3ca59935fdb0
v1.22.12: 0e58133c153be32e8e61004cfdc18f8a02ef465f979c6d5bf3e998fbe3f89fca
v1.22.11: d20398fa95ee724d63c3263af65eeb49e56c963fcace92efed2d2d0f6084c11a
v1.22.10: 2376a7ecc044bc4b5cdae9a0a14d058ae5c1803450f3a8ffdce656785e9e251e
v1.22.9: d7a692ee4f5f5929a15c61947ae2deecb71b0945461f6064ced83d13094028e8
v1.22.8: 604c672908a3b3cbbcf9d109d8d5fef0879992ddcf0d3e0766079d3bb7d0ca3e
v1.22.7: 8291d304c0ba4faec4336336d4cdd5159f5c90652b8b0d6be0cb5ce8f8bf92e3
v1.22.6: fbb823fe82b16c6f37911e907d3e4921f4642d5d48eb60e56aba1d7be0665430
v1.22.5: e68536cff9172d1562edddd7194d20302472a064009bf7c0ed8d79d030cb61aa
v1.22.4: c0049ab240b27a9dd57be2bb98356c62582d975ba2f790a61b34f155b12ab7e6
v1.22.3: d0570f09bd5137ff2f672a0b177a6b78fd294a42db21f094dc02c613436ce8d1
v1.22.2: f5fe3d6f4b2df5a794ebf325dc17fcdfe905a188e25f7c7e47d9cd15f14f8c2d
v1.22.1: d5ffd67d8285fb224a1c49622fd739131f7b941e3d68f233dec96e72c9ebee63
v1.22.0: cea637a7da4f1097b16b0195005351c07032a820a3d64c3ff326b9097cfac930
amd64: amd64:
v1.25.3: d5c89c5e5dae6afa5f06a3e0e653ac3b93fa9a93c775a715531269ec91a54abe
v1.25.2: 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3
v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a
v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24
v1.24.7: 4d24c97c924c40971412cc497145ad823e4b7b87ccda97ebced375f7e886e9e2
v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b
v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631
v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4 v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4
v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11 v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11
v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343 v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343
v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde
v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52 v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52
v1.23.13: 4d8f796b82dbe2b89b6d587bfeedf66724526b211c75a53456d4ac4014e3dcca
v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614
v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7
v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b
v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387 v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387
v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e
@ -262,26 +294,22 @@ kubelet_checksums:
v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6 v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6
v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4 v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4
v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518 v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518
v1.22.13: f55a72f5546ecf463f54e9220a1c38179b94b32ba561dfd6ec1f2fbe8231d640
v1.22.12: d54539bd0fa43b43e9ad2ac4e6644bcb3f1e98b8fc371befba7ac362d93a6b00
v1.22.11: 50fb1ede16c15dfe0bcb9fa98148d969ae8efeb8b599ce5eb5f09ab78345c9d1
v1.22.10: c1aa6e9f59cfc765d33b382f604140699ab97c9c4212a905d5e1bcd7ef9a5c8b
v1.22.9: 61530a9e6a5cb1f971295de860a8ade29db65d0dff50d1ffff3de1155dfd0c02
v1.22.8: 2e6d1774f18c4d4527c3b9197a64ea5705edcf1b547c77b3e683458d771f3ce7
v1.22.7: cfc96b5f781bfbfdcb05115f4e26a5a6afc9d74bb4a5647c057b2c13086fb24d
v1.22.6: 7b009835b0ab74aa16ebf57f5179893035e0cf5994e1bcf9b783275921a0393a
v1.22.5: 2be340f236a25881969eaa7d58b2279a4e31dc393cab289a74c78c0c37ba2154
v1.22.4: 8d014cfe511d8c0a127b4e65ae2a6e60db592f9b1b512bb822490ea35958b10d
v1.22.3: 3f00a5f98cec024abace5bcc3580b80afc78181caf52e100fc800e588774d6eb
v1.22.2: 0fd6572e24e3bebbfd6b2a7cb7adced41dad4a828ef324a83f04b46378a8cb24
v1.22.1: 2079780ad2ff993affc9b8e1a378bf5ee759bf87fdc446e6a892a0bbd7353683
v1.22.0: fec5c596f7f815f17f5d7d955e9707df1ef02a2ca5e788b223651f83376feb7f
ppc64le: ppc64le:
v1.25.3: 447a8b34646936bede22c93ca85f0a98210c9f61d6963a7d71f7f6a5152af1d1
v1.25.2: a45dc00ac3a8074c3e9ec6a45b63c0a654529a657d929f28bd79c550a0d213d7
v1.25.1: c1e3373ac088e934635fb13004a21ada39350033bfa0e4b258c114cb86b69138
v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491
v1.24.7: 621ce04d0cb1c66065303d062bf9ac248225b8428b1adbca3f6fa6dd2eda13cc
v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c
v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a
v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916 v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916
v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907 v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907
v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132 v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132
v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06 v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06
v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931 v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931
v1.23.13: 444c646dc94dd7f7541a91ddc16a0da7259e345e1f84ec648077f447626844a2
v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a
v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6
v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a
v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec
v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f
@ -293,27 +321,23 @@ kubelet_checksums:
v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca
v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1 v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1
v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753 v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753
v1.22.13: ac81fe025a69834f872d70d696472780e8e5713e0ca2450dcfc2cd9745b55239
v1.22.12: 50e418ff8b8d1f4746be37d5658895dfcb892b0a3a8a2dd7320e760d4159826c
v1.22.11: 48e6b0e8d4483e7ccce02dd658b4c92be6859bbb235c58e8902182503280a14c
v1.22.10: da53b707bd5e8b4ae9e720a4e87892e4c0713dd419f0d66cade7e4619a3d8965
v1.22.9: fd5be136a69e011ccb9d4482e4c13f23045e3c9c891e7e87394019f003f5cc79
v1.22.8: 804c336a31dfce44330e358d6b30dd0056859c3edc2b9bf34672d327fa8a2e23
v1.22.7: 3bfa04aa3a443aacdc6cf3b0a500317b5efa5cbdf4d9c343026be442120113b4
v1.22.6: 4e64366b96abaf7b45d14c72f6f84fb51c84a66ea0f25e93e50f986e6af7d29e
v1.22.5: 130ad083528ffaca317070828a308a5bb13e79309ec7e82bdf2bfa981a696a73
v1.22.4: 4f71ef4083bff268c4007c2edc59cb89c1deb4e9f6ecd09fbe009644f3c059bd
v1.22.3: 1d8bcb12d956512c2fb215e9161d4b196684a61836ce528e8bbde933ff36a8b5
v1.22.2: 9b4e555110f747569393220ef12a54ae26eb4168eefb77d4b1e6c1d123f71438
v1.22.1: a8c379fce4b1c1bc40238dfea67db286ec8ffec56ed701d581b53a941f7031bb
v1.22.0: 957dcc6ae45078ce971af183c0061d60168c15f484dcd978588cc6380236423f
kubectl_checksums: kubectl_checksums:
arm: arm:
v1.25.3: 59e1dba0951f19d4d18eb04db50fcd437c1d57460f2008bc03e668f71b8ea685
v1.25.2: d6b581a41b010ef86a9364102f8612d2ee7fbc7dd2036e40ab7c85adb52331cb
v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc
v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131
v1.24.7: 1829c5bb2ef30df6e46f99aa5c87a0f510a809f9169c725b3da08455bcf7f258
v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de
v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f
v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a
v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612 v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612
v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a
v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f
v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc
v1.23.13: c32baf45ad141f967b4877c7151aeee1ae296eebdbcb7a5200d418bd77c284b2
v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9
v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7
v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec
v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80 v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80
v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f
@ -325,26 +349,22 @@ kubectl_checksums:
v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173 v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173
v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5 v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5
v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644 v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644
v1.22.13: 4228743e4e51403692cf9578b35f3550a769804011126a9be18536ac591e8dd2
v1.22.12: 9aa6e8df0dc0c77fd546762ccc78c3f2d349049855c59b0699a3192621590754
v1.22.11: 8e0c2a168aac356b3c84e9366ae19c26fc5ecd1344e3ef92f56377ec4ccddc3b
v1.22.10: daadf5f7c66fdcf2aa62a8504606a058621146379ea1bb52159ea0b087b986b2
v1.22.9: 4b45c5fb69e385f58293c5142d0ee51f79c3e3620a180632bd2370c01d0698e7
v1.22.8: 08ffeb8924c315cd466fc930377ac545edd6ac4ebb8bf284218947256b6729f3
v1.22.7: be9a833a6eae7ee7698ee5cc18bacc2652207af07528e60a78f43a8139fffbfc
v1.22.6: a0dea833198a95ec85b4d55fe7e16333bcdc6a93290238c7473887e7e06f23a7
v1.22.5: 6db514e45f62f611d7e5f862c1eec6009e6de07852cf3cbc37309db1ed76920f
v1.22.4: 9ea171e868aeb64b187a039edd79b2c7ea2bedbd752c76e1c5e44c2486d21f72
v1.22.3: 28e2817751c94940469755911fe3d6a93e288391377f5bb8db08cffa538e72fa
v1.22.2: a16f7d70e65589d2dbd5d4f2115f6ccd4f089fe17a2961c286b809ad94eb052a
v1.22.1: 50991ec4313ee42da03d60e21b90bc15e3252c97db189d1b66aad5bbb555997b
v1.22.0: 6d7c787416a148acffd49746837df4cebb1311c652483dc3d2c8d24ce1cc897e
arm64: arm64:
v1.25.3: cfd5092ce347a69fe49c93681a164d9a8376d69eef587da894207c62ec7d6a5d
v1.25.2: b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5
v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea
v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d
v1.24.7: 4b138a11b13210ce1731e06918f8fff6709c004c6fb6bec28544713854de9fe8
v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219
v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a
v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61 v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61
v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a
v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79 v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79
v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271 v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271
v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d
v1.23.13: 950626ae35fca6c26096f97cac839d76e2f29616048ad30cec68f1ff003840f2
v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437
v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3
v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b
v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21 v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21
v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890 v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890
@ -356,26 +376,22 @@ kubectl_checksums:
v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b
v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524 v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524
v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe
v1.22.13: e3e845bac0e1c30de20438433a8d75c64c237892245887a2818bd877b9601b41
v1.22.12: 7d6507ecb8061f7d94d1bd6b982c56b1a1f929427bcc27a962fe66c61100f12a
v1.22.11: 35da77af0581740aa8815c461ee912181fbb4cec09c2e0c9f6dbee58a48758a6
v1.22.10: 6ce1a1315225d7d62f7d17083c9f87d4f3f5684c80da108799c99780ad520cb3
v1.22.9: 33724bed4dddf4d8ecd6ae75667552d121e2fb575ff2db427ce66516e048edac
v1.22.8: 48105735b74e941a84dec6bd53637c023ad53dc5fadd9bf616347cb339c76b47
v1.22.7: 44342131947bc61e6b03103e7e1302d16fa3e5b2e2cd67e27194f66223ecf798
v1.22.6: b43199fe66a58f292f2c685b922330819190eb22ac41cc5c10c33fdf9f2bbc29
v1.22.5: a122ef299d75c0bec1dc1e28670d358e13743144e68223c8178462ba5c436e1d
v1.22.4: 3fcec0284c0fdfc22e89a5b73ebd7f51120cc3505a11a4f6d6f819d46a40b26a
v1.22.3: ebeac516cc073cfe9550f114ca326f762d958cb91a33c8c9d03ede6ba94a6088
v1.22.2: c5bcc7e5321d34ac42c4635ad4f6fe8bd4698e9c879dc3367be542a0b301297b
v1.22.1: 5c7ef1e505c35a8dc0b708f6b6ecdad6723875bb85554e9f9c3fe591e030ae5c
v1.22.0: 8d9cc92dcc942f5ea2b2fc93c4934875d9e0e8ddecbde24c7d4c4e092cfc7afc
amd64: amd64:
v1.25.3: f57e568495c377407485d3eadc27cda25310694ef4ffc480eeea81dea2b60624
v1.25.2: 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb
v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b
v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885
v1.24.7: 2d88e56d668b1d7575b4783f22d512e94da432f42467c3aeac8a300b6345f12d
v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c
v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed
v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8 v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8
v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1 v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1
v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542 v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542
v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a
v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7 v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7
v1.23.13: fae6957e6a7047ad49cdd20976cd2ce9188b502c831fbf61f36618ea1188ba38
v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9
v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e
v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7 v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7
v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce
v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894 v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894
@ -387,26 +403,22 @@ kubectl_checksums:
v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde
v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e
v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f
v1.22.13: b96d2bc9137ec63546a29513c40c5d4f74e9f89aa11edc15e3c2f674d5fa3e02
v1.22.12: 8e36c8fa431e454e3368c6174ce3111b7f49c28feebdae6801ab3ca45f02d352
v1.22.11: a61c697e3c9871da7b609511248e41d9c9fb6d9e50001425876676924761586b
v1.22.10: 225bc8d4ac86e3a9e36b85d2d9cb90cd4b4afade29ba0292f47834ecf570abf2
v1.22.9: ae6a9b585f9a366d24bb71f508bfb9e2bb90822136138109d3a91cd28e6563bb
v1.22.8: 761bf1f648056eeef753f84c8365afe4305795c5f605cd9be6a715483fe7ca6b
v1.22.7: 4dd14c5b61f112b73a5c9c844011a7887c4ffd6b91167ca76b67197dee54d388
v1.22.6: 1ab07643807a45e2917072f7ba5f11140b40f19675981b199b810552d6af5c53
v1.22.5: fcb54488199c5340ff1bc0e8641d0adacb27bb18d87d0899a45ddbcc45468611
v1.22.4: 21f24aa723002353eba1cc2668d0be22651f9063f444fd01626dce2b6e1c568c
v1.22.3: 0751808ca8d7daba56bf76b08848ef5df6b887e9d7e8a9030dd3711080e37b54
v1.22.2: aeca0018958c1cae0bf2f36f566315e52f87bdab38b440df349cd091e9f13f36
v1.22.1: 78178a8337fc6c76780f60541fca7199f0f1a2e9c41806bded280a4a5ef665c9
v1.22.0: 703e70d49b82271535bc66bc7bd469a58c11d47f188889bd37101c9772f14fa1
ppc64le: ppc64le:
v1.25.3: bd59ac682fffa37806f768328fee3cb791772c4a12bcb155cc64b5c81b6c47ce
v1.25.2: 1e3665de15a591d52943e6417f3102b5d413bc1d86009801ad0def04e8c920c5
v1.25.1: 957170066abc4d4c178ac8d84263a191d351e98978b86b0916c1b8c061da8282
v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513
v1.24.7: a68ec0c8ed579324037fc0a3bafa9d10184e6ff3ca34bfffdcb78f9f02bcb765
v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d
v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98
v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44 v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44
v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e
v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f
v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5 v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5
v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75 v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75
v1.23.13: 785d620dc77d10ce49218894225e935e55d08bb3842ae75c11cb41a814aca9ea
v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a
v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7
v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232 v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232
v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67 v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67
v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f
@ -418,27 +430,23 @@ kubectl_checksums:
v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015 v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015
v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a
v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851 v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851
v1.22.13: fd4a8473a57275579eedd64a5d13aabf801cddef9f4a81f11658c40b19f559da
v1.22.12: 3855d0a2add2a093772cb024b3cf678ddfa840b4a764f925b0c58ff94aaf13ee
v1.22.11: e74b2c62c524b81e22a5e66bf2abe2f036d26bb541663a4383abd6655d365288
v1.22.10: 98226e40cd93c7a23bf3dde675879207d393d886e53d0e3dfdf8a2732307711c
v1.22.9: 4ac554b2eb811c10276761ec185e1dbd96b24df4ed141159960c2325d6451f6e
v1.22.8: 30d5cba5bdee3bb9395a988867a161ff52e7dc01a40cd4fa2a2adb1c08b76227
v1.22.7: b25bcc11619ea61a60a1cfa8bfd4ef15ccb8db008251013b3473cc04082754bc
v1.22.6: d9acb45bcbcead81e8f61572dd800d82e605af2532edb4be1633b732b009d2e2
v1.22.5: a613f330b10b24992780149184ea3257210932ea9f569af323f84e9debfea535
v1.22.4: a89d158be97df1f7b4d56ed28b219c8f09427511283b78917352b9e90b9f37bf
v1.22.3: b55409b40c60fddf24e6e93cbcee2e33c3c5d8f4a6b3f9c8cf4eb1f23119388d
v1.22.2: f8c8c4734846c56a8eae6e5c877c84e38513337ea1ca08d63e019ffe82114342
v1.22.1: 4eced82fb83c405937c35c18de5ac25befa68ca5ab016b3d279011d7f3701eea
v1.22.0: 7ea30171a5db9dfbdc240674f5cde00fb75a8193ef73783950b8d10c810b6a5b
kubeadm_checksums: kubeadm_checksums:
arm: arm:
v1.25.3: 3f357e1e57936ec7812d35681be249b079bbdc1c7f13a75e6159379398e37d5e
v1.25.2: 2f794569c3322bb66309c7f67126b7f88155dfb1f70eea789bec0edf4e10015e
v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588
v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0
v1.24.7: c0a9e6c08cad0b727f06bb3b539d55c65ea977be68fe471f6a9f73af3fbcb275
v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a
v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7
v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4 v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4
v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e
v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266 v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266
v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153 v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153
v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2 v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2
v1.23.13: 54d0f4d7a65abf610606b0538005ab5f177566587a81af6b0bc24ded2f8e305c
v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c
v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622
v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f
v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002 v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002
v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb
@ -450,26 +458,22 @@ kubeadm_checksums:
v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e
v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369 v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369
v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda
v1.22.13: dc8cb74f5f427958eda265c8190c2f12877e71eb4f04269dd85dfa86a8044208
v1.22.12: d2d1f19c74186e9247cea9ff9ba484a658bd4985060979babe5c28389e594d0a
v1.22.11: b2a5a1c827fe18f4589628cdb69e73c1e65011381ec015e1daa7a31198199302
v1.22.10: f1ab42fbadb0a66ba200392ee82c05b65e3d29a3d8f3e030b774cbc48915dedb
v1.22.9: f68ca35fc71691e599d4913de58b6d77abcb2d27c324abc23388b4383b5299ea
v1.22.8: f55fce83ae69b0f660a0fbdd2d05681d2e29a1119d7cce890fe1f50724bdcc60
v1.22.7: 26b3d79d88e81bf354d716fa48210b0358d2f6ca99cba06eb7640ac1e32724b8
v1.22.6: ad23ad06e83f2466f78652221f73fd58d23d6122b3395c24d9a3be779f6afa49
v1.22.5: f0c95c9b86287ec8570388f8fc26ad05ac342f69876a08cb6cb5aa2ffcc1febd
v1.22.4: 5e52ee3c3f0f5bffd9f0d9e7b3e215b5ab239feb425d47d8bd609bd4b1fb1d61
v1.22.3: d3c76311c582e48889bdb3e3ef1948ce0292983a0c13d37c7e8ae5c6024291f5
v1.22.2: 6ccc26494160e19468b0cb55d56b2d5c62d21424fac79cb66402224c2bf73a0d
v1.22.1: cc08281c5261e860df9a0b5040b8aa2e6d202a243daf25556f5f6d3fd8f2e1e9
v1.22.0: 6a002deb0ee191001d5c0e0435e9a995d70aa376d55075c5f61e70ce198433b8
arm64: arm64:
v1.25.3: 61bb61eceff78b44be62a12bce7c62fb232ce1338928e4207deeb144f82f1d06
v1.25.2: 437dc97b0ca25b3fa8d74b39e4059a77397b55c1a6d16bddfd5a889d91490ce0
v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0
v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b
v1.24.7: ee946d82173b63f69be9075e218250d4ab1deec39d17d600b16b6743e5dca289
v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41
v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2
v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6 v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6
v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5 v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5
v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548 v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548
v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04 v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04
v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004 v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004
v1.23.13: 462971d5822c91598754dfaa9c4c8d46a8c74aefef0f4dbbc8be31c4f0d18855
v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947
v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb
v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c
v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113 v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113
v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22 v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22
@ -481,26 +485,22 @@ kubeadm_checksums:
v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff
v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40 v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40
v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6 v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6
v1.22.13: 2c42aadc99b46b6b5684acc7dfa630c67cb12c19b17df4cea3d2091ef5753011
v1.22.12: d0469a3008411edb50f6562e00f1df28123cf2dc368f1538f1b41e27b0482b1c
v1.22.11: 15e1cba65f0db4713bf45ee23dbd01dd30048d20ad97ef985d6b9197f8ae359a
v1.22.10: 8ea22a05b428de70a430711e8f75553e1be2925977ab773b5be1c240bc5b9fcd
v1.22.9: 0168c60d1997435b006b17c95a1d42e55743048cc50ee16c8774498aa203a202
v1.22.8: 67f09853d10434347eb75dbb9c63d57011ba3e4f7e1b320a0c30612b8185be8c
v1.22.7: 2ae0287769a70f442757e49af0ecd9ca2c6e5748e8ba72cb822d669a7aeeb8fa
v1.22.6: bc10e4fb42a182515f4232205bea53f90270b8f80ec1a6c1cc3301bff05e86b7
v1.22.5: 47aa54533289277ac13419c16ffd1a2c35c7af2d6a571261e3d728990bc5fc7d
v1.22.4: 3dfb128e108a3f07c53cae777026f529784a057628c721062d8fdd94b6870b69
v1.22.3: dcd1ecfb7f51fb3929b9c63a984b00cf6baa6136e1d58f943ee2c9a47af5875d
v1.22.2: 77b4c6a56ae0ec142f54a6f5044a7167cdd7193612b04b77bf433ffe1d1918ef
v1.22.1: 85df7978b2e5bb78064ed0bcce14a39d105a1a3968bb92ee5d2f96a1fa09ed12
v1.22.0: 9fc14b993de2c275b54445255d7770bd1d6cdb49f4cf9c227c5b035f658a2351
amd64: amd64:
v1.25.3: 01b59ce429263c62b85d2db18f0ccdef076b866962ed63971ff2bd2864deea7b
v1.25.2: 63ee3de0c386c6f3c155874b46b07707cc72ce5b9e23f336befd0b829c1bd2ad
v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420
v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8
v1.24.7: 8b67319d28bf37e8e7c224954dc778cbe946f2bb0ed86975d8caa83d51c955ee
v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c
v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb
v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c
v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff
v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712 v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712
v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0 v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0
v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318 v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318
v1.23.13: ff86af2b5fa979234dd3f9e7b04ec7d3017239a58417397153726d8077c4ac89
v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83
v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692
v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64 v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64
v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296 v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296
v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89 v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89
@ -512,26 +512,22 @@ kubeadm_checksums:
v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219 v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219
v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34 v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34
v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0 v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0
v1.22.13: acbb0dd67b7656d0c70049484ba31c1981b803be0ae8f430dacad67e3e06c121
v1.22.12: 9410dcff069993caa7dfe783d35ac2d929ec258a2c3a4f0c3f269f1091931263
v1.22.11: da3594b4e905627fd5c158531280e40a71dadf44f1f0b6c061a1b729a898dd9b
v1.22.10: df5e090a3c0e24b92b26f22f1d7689b6ea860099ea89b97edf5d4c19fa6da0ca
v1.22.9: e3061f3a9c52bff82ae740c928fe389a256964a5756d691758bf3611904d7183
v1.22.8: fc10b4e5b66c9bfa6dc297bbb4a93f58051a6069c969905ef23c19680d8d49dc
v1.22.7: 7e4be37fc5ddeeae732886bf83c374198813e76d84ed2f6590145e08ece1a8b2
v1.22.6: 0bf8e47ad91215cd8c5e0ded565645aeb1ad6f0a9223a2486eb913bff929d472
v1.22.5: a512be0fa429f43d3457472efd73529cd2ba2cd54ef714faf6b69486beea054f
v1.22.4: 33b799df2941f12a53ffe995d86a385c35d3c543f9d2c00c0cdb47ec91a98c5c
v1.22.3: 3964e6fd46052eb4a9672421d8e8ce133b83b45abb77481b688dc6375390e480
v1.22.2: 4ff09d3cd2118ee2670bc96ed034620a9a1ea6a69ef38804363d4710a2f90d8c
v1.22.1: 50a5f0d186d7aefae309539e9cc7d530ef1a9b45ce690801655c2bee722d978c
v1.22.0: 90a48b92a57ff6aef63ff409e2feda0713ca926b2cd243fe7e88a84c483456cc
ppc64le: ppc64le:
v1.25.3: 8fe9a69db91c779a8f29b216134508ba49f999fa1e36b295b99444f31266da17
v1.25.2: a53101ed297299bcf1c4f44ec67ff1cb489ab2d75526d8be10c3068f161601a7
v1.25.1: c7e2c8d2b852e1b30894b64875191ce388a3a416d41311b21f2d8594872fe944
v1.25.0: 31bc72e892f3a6eb5db78003d6b6200ba56da46a746455991cb422877afc153d
v1.24.7: 29a53be9a74dcb01ea68b0a385bdd9b510f9792955f9f7c93ed608c851b5dc32
v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15
v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a
v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e
v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2 v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2
v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb
v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137 v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137
v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c
v1.23.13: 3dbf72fdfc108bf41cab151ac340b336ba17b14fa008b15d84ce223b30391914
v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d
v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22
v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1 v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1
v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe
v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af
@ -543,20 +539,6 @@ kubeadm_checksums:
v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01 v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01
v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc
v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1 v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1
v1.22.13: 066051f2efb29656a04dbb6a378b813779fedacbf3be7034286b07ad43e364c7
v1.22.12: 70c14af98ecaa5d4ac234c827a560df9a020b346af250b6fb8ac9e50943486d3
v1.22.11: b2a8d92de208b66e3c2bd03521e26cf84a3977c74242e4f0e6724bdebd861326
v1.22.10: f74feaf8ea42145a668111733e8ed55a05d062ca40b0281851c2c48d28b74468
v1.22.9: aca9539afc208343b0138d2e9e56b018ea782b74068389e7381e1c361f584446
v1.22.8: 715dcac3dc5055306fc9b56352f5323df7947479c831993fecadc3a7c9072071
v1.22.7: 1496cb57091c6189728f295fbc6f8ea944f08fa9f844d917f7f7ca1a3b896acb
v1.22.6: a3aed2613b0566d1c829c15ff1206c25743bade24c4087b039824860d07de517
v1.22.5: d877c380f3fe4ee3c68f02ffa185252129aaba390129fd6a3542f6d9c5e88a6f
v1.22.4: 3e4cc45da8067f0da56e848b39531874c0a144840f4794731a4fb3b4689a8de2
v1.22.3: f993698da6f64a222edb92e352331c46516dbef9e235b12471c9d697aac74524
v1.22.2: 115bdf1e9e4821cf02aa77875930b4640cfba6b3560492ac75fe6159e897be6f
v1.22.1: 45e5145abf4700ddb5de3469ddb6b316e7588595e4a3e64f44064738808b9c97
v1.22.0: 22a7d995e78e93abca2999c911b065d63f51f33982dc305f23762a8d7c045d25
etcd_binary_checksums: etcd_binary_checksums:
# Etcd does not have arm32 builds at the moment, having some dummy value is # Etcd does not have arm32 builds at the moment, having some dummy value is
@ -564,15 +546,19 @@ etcd_binary_checksums:
arm: arm:
v3.5.3: 0 v3.5.3: 0
v3.5.4: 0 v3.5.4: 0
v3.5.5: 0
arm64: arm64:
v3.5.3: 8b00f2f51568303799368ee4a3c9b9ff8a3dd9f8b7772c4f6589e46bc62f7115 v3.5.3: 8b00f2f51568303799368ee4a3c9b9ff8a3dd9f8b7772c4f6589e46bc62f7115
v3.5.4: 8e9c2c28ed6b35f36fd94300541da10e1385f335d677afd8efccdcba026f1fa7 v3.5.4: 8e9c2c28ed6b35f36fd94300541da10e1385f335d677afd8efccdcba026f1fa7
v3.5.5: a8d177ae8ecfd1ef025c35ac8c444041d14e67028c1a7b4eda3a69a8dee5f9c3
amd64: amd64:
v3.5.3: e13e119ff9b28234561738cd261c2a031eb1c8688079dcf96d8035b3ad19ca58 v3.5.3: e13e119ff9b28234561738cd261c2a031eb1c8688079dcf96d8035b3ad19ca58
v3.5.4: b1091166153df1ee0bb29b47fb1943ef0ddf0cd5d07a8fe69827580a08134def v3.5.4: b1091166153df1ee0bb29b47fb1943ef0ddf0cd5d07a8fe69827580a08134def
v3.5.5: 7910a2fdb1863c80b885d06f6729043bff0540f2006bf6af34674df2636cb906
ppc64le: ppc64le:
v3.5.3: f14154897ca5ad4698383b4c197001340fbe467525f6fab3b89ee8116246480f v3.5.3: f14154897ca5ad4698383b4c197001340fbe467525f6fab3b89ee8116246480f
v3.5.4: 2f0389caed87c2504ffc5a07592ca2a688dee45d599073e5f977d9ce75b5f941 v3.5.4: 2f0389caed87c2504ffc5a07592ca2a688dee45d599073e5f977d9ce75b5f941
v3.5.5: 08422dffd5749f0a5f18bd820241d751e539a666af94251c3715cba8f4702c42
cni_binary_checksums: cni_binary_checksums:
arm: arm:
@ -645,13 +631,13 @@ krew_archive_checksums:
helm_archive_checksums: helm_archive_checksums:
arm: arm:
v3.9.2: fb9f0c1c9475c66c2b3579b908c181d519761bbfae963ffac860bc683a2253de v3.9.4: 18ce0f79dcd927fea5b714ca03299929dad05266192d4cde3de6b4c4d4544249
arm64: arm64:
v3.9.2: e4e2f9aad786042d903534e3131bc5300d245c24bbadf64fc46cca1728051dbc v3.9.4: d24163e466f7884c55079d1050968e80a05b633830047116cdfd8ae28d35b0c0
amd64: amd64:
v3.9.2: 3f5be38068a1829670440ccf00b3b6656fd90d0d9cfd4367539f3b13e4c20531 v3.9.4: 31960ff2f76a7379d9bac526ddf889fb79241191f1dbe2a24f7864ddcb3f6560
ppc64le: ppc64le:
v3.9.2: 85ae9bc357095917cdb2d801b7eb62926f3fed6c2dcf07e1280809ad2af3daa9 v3.9.4: c63a951415c192397fda07c2f52aa60639b280920381c48d58be6803eb0c22f9
cri_dockerd_archive_checksums: cri_dockerd_archive_checksums:
arm: arm:
@ -855,6 +841,16 @@ containerd_archive_checksums:
1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9 1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9
1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8 1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8
skopeo_binary_checksums:
arm:
v1.10.0: 0
arm64:
v1.10.0: 3bfc344d4940df29358f8056de7b8dd488b88a5d777b3106748ba66851fa2c58
amd64:
v1.10.0: 20fbd1bac1d33768c3671e4fe9d90c5233d7e13a40e4935b4b24ebc083390604
ppc64l3:
v1.10.0: 0
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}" etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}"
cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}" cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}"
kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}" kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}"
@ -863,6 +859,7 @@ kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}"
calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}" calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}"
calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}" calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}"
crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}" crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}"
crio_archive_checksum: "{{ crio_archive_checksums[image_arch][crio_version] }}"
cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}" cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}"
helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}" helm_archive_checksum: "{{ helm_archive_checksums[image_arch][helm_version] }}"
runc_binary_checksum: "{{ runc_checksums[image_arch][runc_version] }}" runc_binary_checksum: "{{ runc_checksums[image_arch][runc_version] }}"
@ -874,6 +871,7 @@ gvisor_containerd_shim_binary_checksum: "{{ gvisor_containerd_shim_binary_checks
nerdctl_archive_checksum: "{{ nerdctl_archive_checksums[image_arch][nerdctl_version] }}" nerdctl_archive_checksum: "{{ nerdctl_archive_checksums[image_arch][nerdctl_version] }}"
krew_archive_checksum: "{{ krew_archive_checksums[host_os][image_arch][krew_version] }}" krew_archive_checksum: "{{ krew_archive_checksums[host_os][image_arch][krew_version] }}"
containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][containerd_version] }}" containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][containerd_version] }}"
skopeo_binary_checksum: "{{ skopeo_binary_checksums[image_arch][skopeo_version] }}"
# Containers # Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker, # In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
@ -949,7 +947,7 @@ haproxy_image_tag: 2.6.1-alpine
# Coredns version should be supported by corefile-migration (or at least work with) # Coredns version should be supported by corefile-migration (or at least work with)
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail # bundle with kubeadm; if not 'basic' upgrade can sometimes fail
coredns_version: "{{ 'v1.8.6' if (kube_version is version('v1.23.0','>=')) else 'v1.8.0' }}" coredns_version: "v1.8.6"
coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}" coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}"
coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}" coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
@ -982,12 +980,12 @@ local_path_provisioner_version: "v0.0.22"
local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}" local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}"
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller" ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
ingress_nginx_controller_image_tag: "v1.3.0" ingress_nginx_controller_image_tag: "v1.4.0"
ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen" ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen"
ingress_nginx_kube_webhook_certgen_imae_tag: "v1.1.1" ingress_nginx_kube_webhook_certgen_imae_tag: "v1.3.0"
alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller" alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller"
alb_ingress_image_tag: "v1.1.9" alb_ingress_image_tag: "v1.1.9"
cert_manager_version: "v1.9.0" cert_manager_version: "v1.9.1"
cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller" cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}" cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector" cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector"
@ -1009,9 +1007,9 @@ csi_livenessprobe_image_repo: "{{ kube_image_repo }}/sig-storage/livenessprobe"
csi_livenessprobe_image_tag: "v2.5.0" csi_livenessprobe_image_tag: "v2.5.0"
snapshot_controller_supported_versions: snapshot_controller_supported_versions:
v1.25: "v4.2.1"
v1.24: "v4.2.1" v1.24: "v4.2.1"
v1.23: "v4.2.1" v1.23: "v4.2.1"
v1.22: "v4.2.1"
snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller" snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller"
snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}" snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}"
@ -1156,6 +1154,19 @@ downloads:
groups: groups:
- k8s_cluster - k8s_cluster
crio:
file: true
enabled: "{{ container_manager == 'crio' }}"
version: "{{ crio_version }}"
dest: "{{ local_release_dir }}/cri-o.{{ image_arch }}.{{ crio_version }}tar.gz"
sha256: "{{ crio_archive_checksum }}"
url: "{{ crio_download_url }}"
unarchive: true
owner: "root"
mode: "0755"
groups:
- k8s_cluster
cri_dockerd: cri_dockerd:
file: true file: true
enabled: "{{ container_manager == 'docker' }}" enabled: "{{ container_manager == 'docker' }}"
@ -1275,6 +1286,19 @@ downloads:
groups: groups:
- k8s_cluster - k8s_cluster
skopeo:
file: true
enabled: "{{ container_manager == 'crio' }}"
version: "{{ skopeo_version }}"
dest: "{{ local_release_dir }}/skopeo"
sha256: "{{ skopeo_binary_checksum }}"
url: "{{ skopeo_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- kube_control_plane
cilium: cilium:
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
container: true container: true
@ -1302,6 +1326,51 @@ downloads:
groups: groups:
- k8s_cluster - k8s_cluster
cilium_hubble_relay:
enabled: "{{ cilium_enable_hubble }}"
container: true
repo: "{{ cilium_hubble_relay_image_repo }}"
tag: "{{ cilium_hubble_relay_image_tag }}"
sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}"
groups:
- k8s_cluster
cilium_hubble_certgen:
enabled: "{{ cilium_enable_hubble }}"
container: true
repo: "{{ cilium_hubble_certgen_image_repo }}"
tag: "{{ cilium_hubble_certgen_image_tag }}"
sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}"
groups:
- k8s_cluster
cilium_hubble_ui:
enabled: "{{ cilium_enable_hubble }}"
container: true
repo: "{{ cilium_hubble_ui_image_repo }}"
tag: "{{ cilium_hubble_ui_image_tag }}"
sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}"
groups:
- k8s_cluster
cilium_hubble_ui_backend:
enabled: "{{ cilium_enable_hubble }}"
container: true
repo: "{{ cilium_hubble_ui_backend_image_repo }}"
tag: "{{ cilium_hubble_ui_backend_image_tag }}"
sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}"
groups:
- k8s_cluster
cilium_hubble_envoy:
enabled: "{{ cilium_enable_hubble }}"
container: true
repo: "{{ cilium_hubble_envoy_image_repo }}"
tag: "{{ cilium_hubble_envoy_image_tag }}"
sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}"
groups:
- k8s_cluster
multus: multus:
enabled: "{{ kube_network_plugin_multus }}" enabled: "{{ kube_network_plugin_multus }}"
container: true container: true
@ -1495,7 +1564,7 @@ downloads:
tag: "{{ coredns_image_tag }}" tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}" sha256: "{{ coredns_digest_checksum|default(None) }}"
groups: groups:
- kube_control_plane - k8s_cluster
nodelocaldns: nodelocaldns:
enabled: "{{ enable_nodelocaldns }}" enabled: "{{ enable_nodelocaldns }}"
@ -1731,7 +1800,7 @@ downloads:
- kube_control_plane - kube_control_plane
metallb_speaker: metallb_speaker:
enabled: "{{ metallb_enabled }}" enabled: "{{ metallb_speaker_enabled }}"
container: true container: true
repo: "{{ metallb_speaker_image_repo }}" repo: "{{ metallb_speaker_image_repo }}"
tag: "{{ metallb_version }}" tag: "{{ metallb_version }}"

View file

@ -35,8 +35,8 @@
- name: Set image save/load command for crio - name: Set image save/load command for crio
set_fact: set_fact:
image_save_command: "skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }}" image_save_command: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null"
image_load_command: "skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }}" image_load_command: "{{ bin_dir }}/skopeo copy docker-archive:{{ image_path_final }} containers-storage:{{ image_reponame }} 2>/dev/null"
when: container_manager == 'crio' when: container_manager == 'crio'
- name: Set image save/load command for docker on localhost - name: Set image save/load command for docker on localhost
@ -51,5 +51,5 @@
- name: Set image save/load command for crio on localhost - name: Set image save/load command for crio on localhost
set_fact: set_fact:
image_save_command_on_localhost: "skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }}" image_save_command_on_localhost: "{{ bin_dir }}/skopeo copy containers-storage:{{ image_reponame }} docker-archive:{{ image_path_final }} 2>/dev/null"
when: container_manager_on_localhost == 'crio' when: container_manager_on_localhost == 'crio'

View file

@ -66,7 +66,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
etcd_blkio_weight: 1000 etcd_blkio_weight: 1000
etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}" etcd_node_cert_hosts: "{{ groups['k8s_cluster'] }}"
etcd_compaction_retention: "8" etcd_compaction_retention: "8"

View file

@ -33,14 +33,13 @@
stat: stat:
path: "{{ etcd_cert_dir }}/{{ item }}" path: "{{ etcd_cert_dir }}/{{ item }}"
register: etcd_node_certs register: etcd_node_certs
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or when: inventory_hostname in groups['k8s_cluster']
inventory_hostname in groups['k8s_cluster'])
with_items: with_items:
- ca.pem - ca.pem
- node-{{ inventory_hostname }}.pem - node-{{ inventory_hostname }}.pem
- node-{{ inventory_hostname }}-key.pem - node-{{ inventory_hostname }}-key.pem
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node" - name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)"
set_fact: set_fact:
gen_certs: true gen_certs: true
when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
@ -56,13 +55,39 @@
'{{ etcd_cert_dir }}/member-{{ host }}.pem', '{{ etcd_cert_dir }}/member-{{ host }}.pem',
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem', '{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
{% endfor %} {% endfor %}
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %} {% set k8s_nodes = groups['kube_control_plane'] %}
{% for host in k8s_nodes %} {% for host in k8s_nodes %}
'{{ etcd_cert_dir }}/node-{{ host }}.pem', '{{ etcd_cert_dir }}/node-{{ host }}.pem',
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem' '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %} {% if not loop.last %}{{','}}{% endif %}
{% endfor %}] {% endfor %}]
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)"
set_fact:
gen_certs: true
run_once: true
with_items: "{{ expected_files }}"
vars:
expected_files: >-
['{{ etcd_cert_dir }}/ca.pem',
{% set etcd_members = groups['etcd'] %}
{% for host in etcd_members %}
'{{ etcd_cert_dir }}/admin-{{ host }}.pem',
'{{ etcd_cert_dir }}/admin-{{ host }}-key.pem',
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
{% endfor %}
{% set k8s_nodes = groups['k8s_cluster']|unique|sort %}
{% for host in k8s_nodes %}
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
when:
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node" - name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
set_fact: set_fact:
gen_master_certs: |- gen_master_certs: |-
@ -89,7 +114,7 @@
set_fact: set_fact:
gen_node_certs: |- gen_node_certs: |-
{ {
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%} {% set k8s_nodes = groups['k8s_cluster'] -%}
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
{% for host in k8s_nodes -%} {% for host in k8s_nodes -%}
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %} {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
@ -125,8 +150,7 @@
set_fact: set_fact:
kubernetes_host_requires_sync: true kubernetes_host_requires_sync: true
when: when:
- (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or - inventory_hostname in groups['k8s_cluster'] and
inventory_hostname in groups['k8s_cluster']) and
inventory_hostname not in groups['etcd'] inventory_hostname not in groups['etcd']
- (not etcd_node_certs.results[0].stat.exists|default(false)) or - (not etcd_node_certs.results[0].stat.exists|default(false)) or
(not etcd_node_certs.results[1].stat.exists|default(false)) or (not etcd_node_certs.results[1].stat.exists|default(false)) or

View file

@ -38,7 +38,7 @@
- gen_certs|default(false) - gen_certs|default(false)
- inventory_hostname == groups['etcd'][0] - inventory_hostname == groups['etcd'][0]
- name: Gen_certs | run cert generation script - name: Gen_certs | run cert generation script for etcd and kube control plane nodes
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment: environment:
- MASTERS: "{% for m in groups['etcd'] %} - MASTERS: "{% for m in groups['etcd'] %}
@ -46,7 +46,7 @@
{{ m }} {{ m }}
{% endif %} {% endif %}
{% endfor %}" {% endfor %}"
- HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %} - HOSTS: "{% for h in groups['kube_control_plane'] %}
{% if gen_node_certs[h] %} {% if gen_node_certs[h] %}
{{ h }} {{ h }}
{% endif %} {% endif %}
@ -56,7 +56,23 @@
when: gen_certs|default(false) when: gen_certs|default(false)
notify: set etcd_secret_changed notify: set etcd_secret_changed
- name: Gen_certs | Gather etcd member and admin certs from first etcd node - name: Gen_certs | run cert generation script for all clients
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment:
- HOSTS: "{% for h in groups['k8s_cluster'] %}
{% if gen_node_certs[h] %}
{{ h }}
{% endif %}
{% endfor %}"
run_once: yes
delegate_to: "{{ groups['etcd'][0] }}"
when:
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- gen_certs|default(false)
notify: set etcd_secret_changed
- name: Gen_certs | Gather etcd member/admin and kube_control_plane clinet certs from first etcd node
slurp: slurp:
src: "{{ item }}" src: "{{ item }}"
register: etcd_master_certs register: etcd_master_certs
@ -69,6 +85,10 @@
'{{ etcd_cert_dir }}/member-{{ node }}.pem', '{{ etcd_cert_dir }}/member-{{ node }}.pem',
'{{ etcd_cert_dir }}/member-{{ node }}-key.pem', '{{ etcd_cert_dir }}/member-{{ node }}-key.pem',
{% endfor %}]" {% endfor %}]"
- "[{% for node in (groups['kube_control_plane']) %}
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
{% endfor %}]"
delegate_to: "{{ groups['etcd'][0] }}" delegate_to: "{{ groups['etcd'][0] }}"
when: when:
- inventory_hostname in groups['etcd'] - inventory_hostname in groups['etcd']
@ -76,7 +96,7 @@
- inventory_hostname != groups['etcd'][0] - inventory_hostname != groups['etcd'][0]
notify: set etcd_secret_changed notify: set etcd_secret_changed
- name: Gen_certs | Write etcd member and admin certs to other etcd nodes - name: Gen_certs | Write etcd member/admin and kube_control_plane clinet certs to other etcd nodes
copy: copy:
dest: "{{ item.item }}" dest: "{{ item.item }}"
content: "{{ item.content | b64decode }}" content: "{{ item.content | b64decode }}"
@ -96,7 +116,7 @@
src: "{{ item }}" src: "{{ item }}"
register: etcd_master_node_certs register: etcd_master_node_certs
with_items: with_items:
- "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %} - "[{% for node in groups['k8s_cluster'] %}
'{{ etcd_cert_dir }}/node-{{ node }}.pem', '{{ etcd_cert_dir }}/node-{{ node }}.pem',
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem', '{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
{% endfor %}]" {% endfor %}]"
@ -104,6 +124,8 @@
when: when:
- inventory_hostname in groups['etcd'] - inventory_hostname in groups['etcd']
- inventory_hostname != groups['etcd'][0] - inventory_hostname != groups['etcd'][0]
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
notify: set etcd_secret_changed notify: set etcd_secret_changed
- name: Gen_certs | Write node certs to other etcd nodes - name: Gen_certs | Write node certs to other etcd nodes
@ -117,47 +139,21 @@
when: when:
- inventory_hostname in groups['etcd'] - inventory_hostname in groups['etcd']
- inventory_hostname != groups['etcd'][0] - inventory_hostname != groups['etcd'][0]
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
loop_control: loop_control:
label: "{{ item.item }}" label: "{{ item.item }}"
- name: Gen_certs | Set cert names per node - include_tasks: gen_nodes_certs_script.yml
set_fact: when:
my_etcd_node_certs: [ 'ca.pem', - inventory_hostname in groups['kube_control_plane'] and
'node-{{ inventory_hostname }}.pem',
'node-{{ inventory_hostname }}-key.pem']
tags:
- facts
- name: "Check_certs | Set 'sync_certs' to true on nodes"
set_fact:
sync_certs: true
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
inventory_hostname in groups['k8s_cluster']) and
inventory_hostname not in groups['etcd']
with_items:
- "{{ my_etcd_node_certs }}"
- name: Gen_certs | Gather node certs
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
args:
executable: /bin/bash
warn: false
no_log: "{{ not (unsafe_show_logs|bool) }}"
register: etcd_node_certs
check_mode: no
delegate_to: "{{ groups['etcd'][0] }}"
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
inventory_hostname in groups['k8s_cluster']) and
sync_certs|default(false) and inventory_hostname not in groups['etcd'] sync_certs|default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | Copy certs on nodes - include_tasks: gen_nodes_certs_script.yml
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" when:
args: - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
executable: /bin/bash - kube_network_plugin != "calico" or calico_datastore == "etcd"
no_log: "{{ not (unsafe_show_logs|bool) }}" - inventory_hostname in groups['k8s_cluster'] and
changed_when: false
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
inventory_hostname in groups['k8s_cluster']) and
sync_certs|default(false) and inventory_hostname not in groups['etcd'] sync_certs|default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | check certificate permissions - name: Gen_certs | check certificate permissions

View file

@ -0,0 +1,32 @@
---
- name: Gen_certs | Set cert names per node
set_fact:
my_etcd_node_certs: [ 'ca.pem',
'node-{{ inventory_hostname }}.pem',
'node-{{ inventory_hostname }}-key.pem']
tags:
- facts
- name: "Check_certs | Set 'sync_certs' to true on nodes"
set_fact:
sync_certs: true
with_items:
- "{{ my_etcd_node_certs }}"
- name: Gen_certs | Gather node certs
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
args:
executable: /bin/bash
warn: false
no_log: "{{ not (unsafe_show_logs|bool) }}"
register: etcd_node_certs
check_mode: no
delegate_to: "{{ groups['etcd'][0] }}"
changed_when: false
- name: Gen_certs | Copy certs on nodes
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
args:
executable: /bin/bash
no_log: "{{ not (unsafe_show_logs|bool) }}"
changed_when: false

View file

@ -12,6 +12,16 @@
- etcd-secrets - etcd-secrets
- include_tasks: upd_ca_trust.yml - include_tasks: upd_ca_trust.yml
when:
- inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort
tags:
- etcd-secrets
- include_tasks: upd_ca_trust.yml
when:
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
tags: tags:
- etcd-secrets - etcd-secrets
@ -21,7 +31,9 @@
changed_when: false changed_when: false
check_mode: no check_mode: no
when: when:
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
tags: tags:
- master - master
- network - network
@ -30,7 +42,9 @@
set_fact: set_fact:
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
when: when:
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
tags: tags:
- master - master
- network - network

View file

@ -33,7 +33,7 @@
{{ primaryClusterIP }} {{ primaryClusterIP }}
{%- endif -%} {%- endif -%}
upstreamForwardTarget: >- upstreamForwardTarget: >-
{%- if resolvconf_mode == 'host_resolvconf' and upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }} {{ upstream_dns_servers|join(' ') }}
{%- else -%} {%- else -%}
/etc/resolv.conf /etc/resolv.conf
@ -61,7 +61,7 @@
{{ primaryClusterIP }} {{ primaryClusterIP }}
{%- endif -%} {%- endif -%}
upstreamForwardTarget: >- upstreamForwardTarget: >-
{%- if resolvconf_mode == 'host_resolvconf' and upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%} {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }} {{ upstream_dns_servers|join(' ') }}
{%- else -%} {%- else -%}
/etc/resolv.conf /etc/resolv.conf

View file

@ -13,6 +13,11 @@ data:
{{ block['zones'] | join(' ') }} { {{ block['zones'] | join(' ') }} {
log log
errors errors
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
forward . {{ block['nameservers'] | join(' ') }} forward . {{ block['nameservers'] | join(' ') }}
loadbalance loadbalance
cache {{ block['cache'] | default(5) }} cache {{ block['cache'] | default(5) }}
@ -44,10 +49,12 @@ data:
{% if upstream_dns_servers is defined and upstream_dns_servers|length > 0 %} {% if upstream_dns_servers is defined and upstream_dns_servers|length > 0 %}
forward . {{ upstream_dns_servers|join(' ') }} { forward . {{ upstream_dns_servers|join(' ') }} {
prefer_udp prefer_udp
max_concurrent 1000
} }
{% else %} {% else %}
forward . /etc/resolv.conf { forward . /etc/resolv.conf {
prefer_udp prefer_udp
max_concurrent 1000
} }
{% endif %} {% endif %}
{% if enable_coredns_k8s_external %} {% if enable_coredns_k8s_external %}

View file

@ -32,8 +32,14 @@ spec:
cpu: {{ netchecker_server_cpu_requests }} cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }} memory: {{ netchecker_server_memory_requests }}
securityContext: securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }} runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }} runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports: ports:
- containerPort: 8081 - containerPort: 8081
args: args:
@ -63,8 +69,14 @@ spec:
cpu: {{ netchecker_etcd_cpu_requests }} cpu: {{ netchecker_etcd_cpu_requests }}
memory: {{ netchecker_etcd_memory_requests }} memory: {{ netchecker_etcd_memory_requests }}
securityContext: securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: {{ netchecker_server_user | default('0') }} runAsUser: {{ netchecker_server_user | default('0') }}
runAsGroup: {{ netchecker_server_group | default('0') }} runAsGroup: {{ netchecker_server_group | default('0') }}
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists

View file

@ -14,6 +14,11 @@ data:
errors errors
cache {{ block['cache'] | default(30) }} cache {{ block['cache'] | default(30) }}
reload reload
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
{% endif %}
loop loop
bind {{ nodelocaldns_ip }} bind {{ nodelocaldns_ip }}
forward . {{ block['nameservers'] | join(' ') }} forward . {{ block['nameservers'] | join(' ') }}

View file

@ -1,5 +1,5 @@
--- ---
argocd_enabled: false argocd_enabled: false
argocd_version: v2.4.7 argocd_version: v2.4.15
argocd_namespace: argocd argocd_namespace: argocd
# argocd_admin_password: # argocd_admin_password:

View file

@ -2,7 +2,7 @@
- name: Kubernetes Apps | Install yq - name: Kubernetes Apps | Install yq
become: yes become: yes
get_url: get_url:
url: "https://github.com/mikefarah/yq/releases/download/v4.25.3/yq_linux_amd64" url: "https://github.com/mikefarah/yq/releases/download/v4.27.5/yq_linux_{{ host_architecture }}"
dest: "{{ bin_dir }}/yq" dest: "{{ bin_dir }}/yq"
mode: '0755' mode: '0755'

View file

@ -80,9 +80,6 @@ spec:
- name: kubelet-dir - name: kubelet-dir
mountPath: /var/lib/kubelet mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: pods-cloud-data
mountPath: /var/lib/cloud/data
readOnly: true
- name: pods-probe-dir - name: pods-probe-dir
mountPath: /dev mountPath: /dev
mountPropagation: "HostToContainer" mountPropagation: "HostToContainer"
@ -110,10 +107,6 @@ spec:
hostPath: hostPath:
path: /var/lib/kubelet path: /var/lib/kubelet
type: Directory type: Directory
- name: pods-cloud-data
hostPath:
path: /var/lib/cloud/data
type: Directory
- name: pods-probe-dir - name: pods-probe-dir
hostPath: hostPath:
path: /dev path: /dev

View file

@ -1,4 +1,4 @@
apiVersion: policy/v1beta1 apiVersion: policy/v1
kind: PodDisruptionBudget kind: PodDisruptionBudget
metadata: metadata:
name: cinder-csi-pdb name: cinder-csi-pdb

View file

@ -3,8 +3,14 @@ upcloud_csi_controller_replicas: 1
upcloud_csi_provisioner_image_tag: "v3.1.0" upcloud_csi_provisioner_image_tag: "v3.1.0"
upcloud_csi_attacher_image_tag: "v3.4.0" upcloud_csi_attacher_image_tag: "v3.4.0"
upcloud_csi_resizer_image_tag: "v1.4.0" upcloud_csi_resizer_image_tag: "v1.4.0"
upcloud_csi_plugin_image_tag: "v0.2.1" upcloud_csi_plugin_image_tag: "v0.3.3"
upcloud_csi_node_image_tag: "v2.5.0" upcloud_csi_node_image_tag: "v2.5.0"
upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}" upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}"
upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}" upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}"
upcloud_tolerations: [] upcloud_tolerations: []
upcloud_csi_enable_volume_snapshot: false
upcloud_csi_snapshot_controller_replicas: 2
upcloud_csi_snapshotter_image_tag: "v4.2.1"
upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"

Some files were not shown because too many files have changed in this diff Show more