Compare commits

..

6 commits

Author SHA1 Message Date
Calin Cristian Andrei
2187882ee0 fix contrib/<file>.md errors identified by markdownlint 2022-08-07 13:47:51 +00:00
Calin Cristian Andrei
4a994c82d1 fix docs/<file>.md errors identified by markdownlint
*	docs/azure-csi.md
* docs/azure.md
* docs/bootstrap-os.md
*	docs/calico.md
* docs/debian.md
* docs/fcos.md
*	docs/vagrant.md
* docs/gcp-lb.md
* docs/kubernetes-apps/registry.md
* docs/setting-up-your-first-cluster.md
* docs/vagrant.md
*	docs/vars.md
2022-08-07 12:41:09 +00:00
Calin Cristian Andrei
b074b91ee9 fix docs/integration.md errors identified by markdownlint 2022-08-07 12:13:18 +00:00
Calin Cristian Andrei
b3f7be7135 describe the use of pre-commit hook in CONTRIBUTING.md 2022-08-07 11:58:47 +00:00
Calin Cristian Andrei
d4082da97f add tmp.md to .gitignore 2022-08-07 11:23:43 +00:00
Calin Cristian Andrei
faecc7420d add pre-commit hook configuration 2022-08-07 11:21:50 +00:00
303 changed files with 4043 additions and 7592 deletions

2
.gitignore vendored
View file

@ -20,13 +20,11 @@ contrib/terraform/aws/credentials.tfvars
*~ *~
vagrant/ vagrant/
plugins/mitogen plugins/mitogen
deploy.sh
# Ansible inventory # Ansible inventory
inventory/* inventory/*
!inventory/local !inventory/local
!inventory/sample !inventory/sample
!inventory/c12s-sample
inventory/*/artifacts/ inventory/*/artifacts/
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files

View file

@ -8,7 +8,7 @@ stages:
- deploy-special - deploy-special
variables: variables:
KUBESPRAY_VERSION: v2.20.0 KUBESPRAY_VERSION: v2.19.0
FAILFASTCI_NAMESPACE: 'kargo-ci' FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true" ANSIBLE_FORCE_COLOR: "true"

View file

@ -75,13 +75,6 @@ check-readme-versions:
script: script:
- tests/scripts/check_readme_versions.sh - tests/scripts/check_readme_versions.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix: ci-matrix:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]

View file

@ -51,11 +51,6 @@ packet_ubuntu20-aio-docker:
extends: .packet_pr extends: .packet_pr
when: on_success when: on_success
packet_ubuntu20-calico-aio-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu18-calico-aio: packet_ubuntu18-calico-aio:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet_pr
@ -156,11 +151,6 @@ packet_rockylinux8-calico:
extends: .packet_pr extends: .packet_pr
when: on_success when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-docker: packet_almalinux8-docker:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet_pr

View file

@ -43,7 +43,6 @@ vagrant_ubuntu20-flannel:
stage: deploy-part2 stage: deploy-part2
extends: .vagrant extends: .vagrant
when: on_success when: on_success
allow_failure: false
vagrant_ubuntu16-kube-router-sep: vagrant_ubuntu16-kube-router-sep:
stage: deploy-part2 stage: deploy-part2

View file

@ -38,7 +38,7 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
1. Submit an issue describing your proposed change to the repo in question. 1. Submit an issue describing your proposed change to the repo in question.
2. The [repo owners](OWNERS) will respond to your issue promptly. 2. The [repo owners](OWNERS) will respond to your issue promptly.
3. Fork the desired repo, develop and test your code changes. 3. Fork the desired repo, develop and test your code changes.
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo. 4. Install [pre-commit](https://pre-commit.com) and install it in your development repo).
5. Addess any pre-commit validation failures. 5. Addess any pre-commit validation failures.
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>) 6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
7. Submit a pull request. 7. Submit a pull request.

View file

@ -8,8 +8,6 @@ aliases:
- floryut - floryut
- oomichi - oomichi
- cristicalin - cristicalin
- liupeng0518
- yankay
kubespray-reviewers: kubespray-reviewers:
- holmsten - holmsten
- bozzo - bozzo
@ -18,7 +16,6 @@ aliases:
- jayonlau - jayonlau
- cristicalin - cristicalin
- liupeng0518 - liupeng0518
- yankay
kubespray-emeritus_approvers: kubespray-emeritus_approvers:
- riverzhang - riverzhang
- atoms - atoms

View file

@ -57,11 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this: You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession ```ShellSession
git checkout v2.20.0 docker pull quay.io/kubespray/kubespray:v2.19.0
docker pull quay.io/kubespray/kubespray:v2.20.0
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \ --mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.20.0 bash quay.io/kubespray/kubespray:v2.19.0 bash
# Inside the container you may now run the kubespray playbooks: # Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
``` ```
@ -114,7 +113,6 @@ vagrant up
- [Air-Gap installation](docs/offline-environment.md) - [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md) - [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md) - [Hardening](docs/hardening.md)
- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md) - [Roadmap](docs/roadmap.md)
## Supported Linux Distributions ## Supported Linux Distributions
@ -122,46 +120,44 @@ vagrant up
- **Flatcar Container Linux by Kinvolk** - **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch - **Debian** Bullseye, Buster, Jessie, Stretch
- **Ubuntu** 16.04, 18.04, 20.04, 22.04 - **Ubuntu** 16.04, 18.04, 20.04, 22.04
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8) - **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
- **Fedora** 35, 36 - **Fedora** 35, 36
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md)) - **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed - **openSUSE** Leap 15.x/Tumbleweed
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8) - **Oracle Linux** 7, [8](docs/centos.md#centos-8)
- **Alma Linux** [8, 9](docs/centos.md#centos-8) - **Alma Linux** [8](docs/centos.md#centos-8)
- **Rocky Linux** [8, 9](docs/centos.md#centos-8) - **Rocky Linux** [8](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md)) - **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)) - **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
## Supported Components ## Supported Components
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
- [etcd](https://github.com/etcd-io/etcd) v3.5.6 - [etcd](https://github.com/etcd-io/etcd) v3.5.4
- [docker](https://www.docker.com/) v20.10 (see note) - [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.6.14 - [containerd](https://containerd.io/) v1.6.6
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin - Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1 - [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
- [calico](https://github.com/projectcalico/calico) v3.24.5 - [calico](https://github.com/projectcalico/calico) v3.23.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.12.1 - [cilium](https://github.com/cilium/cilium) v1.11.7
- [flannel](https://github.com/flannel-io/flannel) v0.19.2 - [flannel](https://github.com/flannel-io/flannel) v0.18.1
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1 - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
- [multus](https://github.com/intel/multus-cni) v3.8 - [multus](https://github.com/intel/multus-cni) v3.8
- [weave](https://github.com/weaveworks/weave) v2.8.1 - [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5 - [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
- Application - Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1 - [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
- [coredns](https://github.com/coredns/coredns) v1.9.3 - [coredns](https://github.com/coredns/coredns) v1.8.6
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3 - [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- [argocd](https://argoproj.github.io/) v2.4.16 - [argocd](https://argoproj.github.io/) v2.4.7
- [helm](https://helm.sh/) v3.9.4 - [helm](https://helm.sh/) v3.9.2
- [metallb](https://metallb.universe.tf/) v0.12.1 - [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1 - [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin - Storage Plugin
@ -172,7 +168,7 @@ Note: Upstart/SysV init based OS types are not supported.
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0 - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0 - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22 - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0 - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
## Container Runtime Notes ## Container Runtime Notes
@ -181,7 +177,7 @@ Note: Upstart/SysV init based OS types are not supported.
## Requirements ## Requirements
- **Minimum required version of Kubernetes is v1.23** - **Minimum required version of Kubernetes is v1.22**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** - **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**. - The target servers are configured to allow **IPv4 forwarding**.
@ -250,7 +246,6 @@ See also [Network checker](docs/netcheck.md).
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests ## CI Tests

View file

@ -9,7 +9,5 @@
# #
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/ # INSTRUCTIONS AT https://kubernetes.io/security/
atoms
mattymo mattymo
floryut
oomichi
cristicalin

2
Vagrantfile vendored
View file

@ -31,7 +31,7 @@ SUPPORTED_OS = {
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"}, "rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"}, "fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"}, "fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"}, "opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, "oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"}, "oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},

View file

@ -4,7 +4,7 @@
become: no become: no
vars: vars:
minimal_ansible_version: 2.11.0 minimal_ansible_version: 2.11.0
maximal_ansible_version: 2.14.0 maximal_ansible_version: 2.13.0
ansible_connection: local ansible_connection: local
tags: always tags: always
tasks: tasks:

View file

@ -35,7 +35,7 @@
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine } - { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" } - { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane - hosts: etcd
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}" environment: "{{ proxy_disable_env }}"
@ -59,10 +59,7 @@
vars: vars:
etcd_cluster_setup: false etcd_cluster_setup: false
etcd_events_cluster_setup: false etcd_events_cluster_setup: false
when: when: etcd_deployment_type != "kubeadm"
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster - hosts: k8s_cluster
gather_facts: False gather_facts: False

View file

@ -13,7 +13,7 @@
# under the License. # under the License.
import inventory import inventory
from io import StringIO from test import support
import unittest import unittest
from unittest import mock from unittest import mock
@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
'access_ip': '10.90.0.3'}}}}) 'access_ip': '10.90.0.3'}}}})
with mock.patch('builtins.open', mock_io): with mock.patch('builtins.open', mock_io):
with self.assertRaises(SystemExit) as cm: with self.assertRaises(SystemExit) as cm:
with mock.patch('sys.stdout', new_callable=StringIO) as stdout: with support.captured_stdout() as stdout:
inventory.KubesprayInventory( inventory.KubesprayInventory(
changed_hosts=["print_hostnames"], changed_hosts=["print_hostnames"],
config_file="file") config_file="file")

View file

@ -4,7 +4,7 @@ module "kubernetes" {
source = "./modules/kubernetes-cluster" source = "./modules/kubernetes-cluster"
prefix = var.prefix prefix = var.prefix
zone = var.zone
machines = var.machines machines = var.machines
ssh_public_keys = var.ssh_public_keys ssh_public_keys = var.ssh_public_keys

View file

@ -2,18 +2,18 @@
${connection_strings_master} ${connection_strings_master}
${connection_strings_worker} ${connection_strings_worker}
[kube_control_plane] [kube-master]
${list_master} ${list_master}
[etcd] [etcd]
${list_master} ${list_master}
[kube_node] [kube-node]
${list_worker} ${list_worker}
[k8s_cluster:children] [k8s-cluster:children]
kube-master kube-master
kube-node kube-node
[k8s_cluster:vars] [k8s-cluster:vars]
network_id=${network_id} network_id=${network_id}

View file

@ -270,7 +270,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | |`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | |`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | |`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | |`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | |`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
@ -295,8 +294,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration `number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory. using the `k8s_nodes` variable.
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
For example: For example:
@ -316,7 +314,6 @@ k8s_nodes = {
"az" = "sto3" "az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445" "flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true "floating_ip" = true
"extra_groups" = "calico_rr"
} }
} }
``` ```

View file

@ -84,7 +84,6 @@ module "compute" {
supplementary_node_groups = var.supplementary_node_groups supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports worker_allowed_ports = var.worker_allowed_ports
bastion_allowed_ports = var.bastion_allowed_ports
use_access_ip = var.use_access_ip use_access_ip = var.use_access_ip
master_server_group_policy = var.master_server_group_policy master_server_group_policy = var.master_server_group_policy
node_server_group_policy = var.node_server_group_policy node_server_group_policy = var.node_server_group_policy
@ -97,11 +96,6 @@ module "compute" {
network_router_id = module.network.router_id network_router_id = module.network.router_id
network_id = module.network.network_id network_id = module.network.network_id
use_existing_network = var.use_existing_network use_existing_network = var.use_existing_network
private_subnet_id = module.network.subnet_id
depends_on = [
module.network.subnet_id
]
} }
output "private_subnet_id" { output "private_subnet_id" {
@ -117,7 +111,7 @@ output "router_id" {
} }
output "k8s_master_fips" { output "k8s_master_fips" {
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address] value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
} }
output "k8s_node_fips" { output "k8s_node_fips" {

View file

@ -15,11 +15,8 @@ data "openstack_images_image_v2" "image_master" {
name = var.image_master == "" ? var.image : var.image_master name = var.image_master == "" ? var.image : var.image_master
} }
data "cloudinit_config" "cloudinit" { data "template_file" "cloudinit" {
part { template = file("${path.module}/templates/cloudinit.yaml")
content_type = "text/cloud-config"
content = file("${path.module}/templates/cloudinit.yaml")
}
} }
data "openstack_networking_network_v2" "k8s_network" { data "openstack_networking_network_v2" "k8s_network" {
@ -85,17 +82,6 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
security_group_id = openstack_networking_secgroup_v2.bastion[0].id security_group_id = openstack_networking_secgroup_v2.bastion[0].id
} }
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
count = length(var.bastion_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_v2" "k8s" { resource "openstack_networking_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s" name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes" description = "${var.cluster_name} - Kubernetes"
@ -209,9 +195,6 @@ resource "openstack_networking_port_v2" "bastion_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -224,7 +207,7 @@ resource "openstack_compute_instance_v2" "bastion" {
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_bastion flavor_id = var.flavor_bastion
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -262,9 +245,6 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -278,7 +258,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
@ -325,9 +305,6 @@ resource "openstack_networking_port_v2" "k8s_masters_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -386,9 +363,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -402,7 +376,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
@ -449,9 +423,6 @@ resource "openstack_networking_port_v2" "etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -465,7 +436,7 @@ resource "openstack_compute_instance_v2" "etcd" {
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_etcd flavor_id = var.flavor_etcd
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
@ -506,9 +477,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -563,9 +531,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -579,7 +544,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
@ -621,9 +586,6 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -637,7 +599,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -684,9 +646,6 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -700,7 +659,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -742,9 +701,6 @@ resource "openstack_networking_port_v2" "k8s_nodes_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id
@ -758,7 +714,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = each.value.flavor flavor_id = each.value.flavor
key_pair = openstack_compute_keypair_v2.k8s.name key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered user_data = data.template_file.cloudinit.rendered
dynamic "block_device" { dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -786,7 +742,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
metadata = { metadata = {
ssh_user = var.ssh_user ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}" kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
depends_on = var.network_router_id depends_on = var.network_router_id
use_access_ip = var.use_access_ip use_access_ip = var.use_access_ip
} }
@ -804,9 +760,6 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [ depends_on = [
var.network_router_id var.network_router_id

View file

@ -136,10 +136,6 @@ variable "worker_allowed_ports" {
type = list type = list
} }
variable "bastion_allowed_ports" {
type = list
}
variable "use_access_ip" {} variable "use_access_ip" {}
variable "master_server_group_policy" { variable "master_server_group_policy" {
@ -189,7 +185,3 @@ variable "port_security_enabled" {
variable "force_null_port_security" { variable "force_null_port_security" {
type = bool type = bool
} }
variable "private_subnet_id" {
type = string
}

View file

@ -257,12 +257,6 @@ variable "worker_allowed_ports" {
] ]
} }
variable "bastion_allowed_ports" {
type = list(any)
default = []
}
variable "use_access_ip" { variable "use_access_ip" {
default = 1 default = 1
} }

View file

@ -251,8 +251,8 @@ resource "upcloud_firewall_rules" "master" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -267,8 +267,8 @@ resource "upcloud_firewall_rules" "master" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -283,8 +283,8 @@ resource "upcloud_firewall_rules" "master" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv6" family = "IPv6"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -299,8 +299,8 @@ resource "upcloud_firewall_rules" "master" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv6" family = "IPv6"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -315,8 +315,8 @@ resource "upcloud_firewall_rules" "master" {
content { content {
action = "accept" action = "accept"
comment = "NTP Port" comment = "NTP Port"
source_port_end = "123" destination_port_end = "123"
source_port_start = "123" destination_port_start = "123"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -325,20 +325,6 @@ resource "upcloud_firewall_rules" "master" {
} }
} }
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
}
}
firewall_rule { firewall_rule {
action = var.firewall_default_deny_in ? "drop" : "accept" action = var.firewall_default_deny_in ? "drop" : "accept"
direction = "in" direction = "in"
@ -408,8 +394,8 @@ resource "upcloud_firewall_rules" "k8s" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -424,8 +410,8 @@ resource "upcloud_firewall_rules" "k8s" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -440,8 +426,8 @@ resource "upcloud_firewall_rules" "k8s" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv6" family = "IPv6"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -456,8 +442,8 @@ resource "upcloud_firewall_rules" "k8s" {
content { content {
action = "accept" action = "accept"
comment = "UpCloud DNS" comment = "UpCloud DNS"
source_port_end = "53" destination_port_end = "53"
source_port_start = "53" destination_port_start = "53"
direction = "in" direction = "in"
family = "IPv6" family = "IPv6"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -472,8 +458,8 @@ resource "upcloud_firewall_rules" "k8s" {
content { content {
action = "accept" action = "accept"
comment = "NTP Port" comment = "NTP Port"
source_port_end = "123" destination_port_end = "123"
source_port_start = "123" destination_port_start = "123"
direction = "in" direction = "in"
family = "IPv4" family = "IPv4"
protocol = firewall_rule.value protocol = firewall_rule.value
@ -482,20 +468,6 @@ resource "upcloud_firewall_rules" "k8s" {
} }
} }
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
}
}
firewall_rule { firewall_rule {
action = var.firewall_default_deny_in ? "drop" : "accept" action = var.firewall_default_deny_in ? "drop" : "accept"
direction = "in" direction = "in"

View file

@ -1,13 +0,0 @@
#!/bin/bash
ansible-playbook -i inventory/${MY_INVENTORY}/inventory.ini --become --user=${MY_SSH_USER} --become-user=root cluster.yml -e etcd_retries=42
export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf
echo
echo "execute the following in any shell where you want to connect to your cluster with kubectl : "
echo "export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf"
kubectl create namespace infra
echo
echo ArgoCD admin password :
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo

View file

@ -37,8 +37,6 @@
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md) * [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md) * [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
* [Amazon Linux 2](docs/amazonlinux.md) * [Amazon Linux 2](docs/amazonlinux.md)
* [UOS Linux](docs/uoslinux.md)
* [openEuler notes](docs/openeuler.md))
* CRI * CRI
* [Containerd](docs/containerd.md) * [Containerd](docs/containerd.md)
* [Docker](docs/docker.md) * [Docker](docs/docker.md)

View file

@ -3,7 +3,7 @@
## Installing Ansible ## Installing Ansible
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them. Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
Depending on your available python version you may be limited in choosing which ansible version to use. Depending on your available python version you may be limited in chooding which ansible version to use.
It is recommended to deploy the ansible version used by kubespray into a python virtual environment. It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
@ -281,7 +281,7 @@ For more information about Ansible and bastion hosts, read
## Mitogen ## Mitogen
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation. Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
## Beyond ansible 2.9 ## Beyond ansible 2.9

View file

@ -72,14 +72,9 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes. In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located. For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
The following variables need to be set as follow: The following variables need to be set:
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
```yml you'll need to edit the inventory and add a hostvar `local_as` by node.
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
nat_outgoing: false # (optional) NAT outgoing (default value: true).
```
And you'll need to edit the inventory and add a hostvar `local_as` by node.
```ShellSession ```ShellSession
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
@ -176,8 +171,6 @@ node5
[rack0:vars] [rack0:vars]
cluster_id="1.0.0.1" cluster_id="1.0.0.1"
calico_rr_id=rr1
calico_group_id=rr1
``` ```
The inventory above will deploy the following topology assuming that calico's The inventory above will deploy the following topology assuming that calico's
@ -205,14 +198,6 @@ To re-define health host please set the following variable in your inventory:
calico_healthhost: "0.0.0.0" calico_healthhost: "0.0.0.0"
``` ```
### Optional : Configure VXLAN hardware Offload
Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this:
```yml
calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver)
```
### Optional : Configure Calico Node probe timeouts ### Optional : Configure Calico Node probe timeouts
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this: Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
@ -226,7 +211,7 @@ calico_node_readinessprobe_timeout: 10
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation. Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
*IP in IP* and *VXLAN* is mutually exclusive modes. *IP in IP* and *VXLAN* is mutualy exclusive modes.
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices. Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
@ -259,14 +244,14 @@ calico_network_backend: 'bird'
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings. If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding. Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
```shell ```shell
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}' calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}' calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
``` ```
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray. **Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`. Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
@ -383,7 +368,7 @@ use_localhost_as_kubeapi_loadbalancer: true
### Tunneled versus Direct Server Return ### Tunneled versus Direct Server Return
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service. By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
To configure DSR: To configure DSR:
@ -409,7 +394,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
## Wireguard Encryption ## Wireguard Encryption
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic). Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
To enable wireguard support: To enable wireguard support:

View file

@ -2,12 +2,12 @@
## CentOS 7 ## CentOS 7
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above. The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported. Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
## CentOS 8 ## CentOS 8
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8) CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
The only tested configuration for now is using Calico CNI The only tested configuration for now is using Calico CNI
You need to add `calico_iptables_backend: "NFT"` to your configuration. You need to add `calico_iptables_backend: "NFT"` to your configuration.

View file

@ -16,7 +16,6 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
@ -36,7 +35,6 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@ -56,7 +54,6 @@ fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View file

@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version ## Choose Cilium version
```yml ```yml
cilium_version: v1.12.1 cilium_version: v1.11.3
``` ```
## Add variable to config ## Add variable to config
@ -121,23 +121,6 @@ cilium_encryption_type: "wireguard"
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer. Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
## Bandwidth Manager
Ciliums bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
To use this function, set the following parameters
```yml
cilium_enable_bandwidth_manager: true
```
## Install Cilium Hubble ## Install Cilium Hubble
k8s-net-cilium.yml: k8s-net-cilium.yml:
@ -170,32 +153,3 @@ cilium_hubble_metrics:
``` ```
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics) [More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
## Upgrade considerations
### Rolling-restart timeouts
Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update.
As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster.
As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml)
are not appropriate for large clusters.
This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance.
This is configured by the following values:
```yaml
# Configure how long to wait for the Cilium DaemonSet to be ready again
cilium_rolling_restart_wait_retries_count: 30
cilium_rolling_restart_wait_retries_delay_seconds: 10
```
The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no
drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns.
Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it
to become ready.
Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You
probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you.

View file

@ -39,68 +39,4 @@ containerd_registries:
image_command_tool: crictl image_command_tool: crictl
``` ```
### Containerd Runtimes
Containerd supports multiple runtime configurations that can be used with
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
details of containerd configuration.
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
```yaml
containerd_runc_runtime:
name: runc
type: "io.containerd.runc.v2"
engine: ""
root: ""
options:
systemdCgroup: "false"
binaryName: /usr/local/bin/my-runc
base_runtime_spec: cri-base.json
```
Further runtimes can be configured with `containerd_additional_runtimes`, which
is a list of such dictionaries.
Default runtime can be changed by setting `containerd_default_runtime`.
#### Base runtime specs and limiting number of open files
`base_runtime_spec` key in a runtime dictionary is used to explicitly
specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`,
which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and
updated to include a custom setting for maximum number of file descriptors per
container.
You can change maximum number of file descriptors per container for the default
`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile`
variable.
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
```yaml
containerd_base_runtime_specs:
cri-spec-custom.json: |
{
"ociVersion": "1.0.2-dev",
"process": {
"user": {
"uid": 0,
...
```
The files in this dict will be placed in containerd config directory,
`/etc/containerd` by default. The files can then be referenced by filename in a
runtime:
```yaml
containerd_runc_runtime:
name: runc
base_runtime_spec: cri-spec-custom.json
...
```
[containerd]: https://containerd.io/ [containerd]: https://containerd.io/
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
[runtime-spec]: https://github.com/opencontainers/runtime-spec

View file

@ -19,14 +19,6 @@ ndots value to be used in ``/etc/resolv.conf``
It is important to note that multiple search domains combined with high ``ndots`` It is important to note that multiple search domains combined with high ``ndots``
values lead to poor performance of DNS stack, so please choose it wisely. values lead to poor performance of DNS stack, so please choose it wisely.
## dns_timeout
timeout value to be used in ``/etc/resolv.conf``
## dns_attempts
attempts value to be used in ``/etc/resolv.conf``
### searchdomains ### searchdomains
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
@ -34,8 +26,6 @@ Custom search domains to be added in addition to the cluster search domains (``d
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit. to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
`remove_default_searchdomains: true` will remove the default cluster search domains.
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
additional search domains. Please take this into the accounts for the limits. additional search domains. Please take this into the accounts for the limits.
@ -50,12 +40,6 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
DNS servers in early cluster deployment when no cluster DNS is available yet. DNS servers in early cluster deployment when no cluster DNS is available yet.
### dns_upstream_forward_extra_opts
Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see <https://coredns.io/plugins/forward/> for details).
These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable.
By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`).
### coredns_external_zones ### coredns_external_zones
Array of optional external zones to coredns forward queries to. It's injected into Array of optional external zones to coredns forward queries to. It's injected into
@ -78,13 +62,6 @@ coredns_external_zones:
nameservers: nameservers:
- 192.168.0.53 - 192.168.0.53
cache: 0 cache: 0
- zones:
- mydomain.tld
nameservers:
- 10.233.0.3
cache: 5
rewrite:
- name stop website.tld website.namespace.svc.cluster.local
``` ```
or as INI or as INI
@ -230,7 +207,7 @@ cluster service names.
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default). Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md). More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.** **As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
@ -286,8 +263,7 @@ nodelocaldns_secondary_skew_seconds: 5
* the ``searchdomains`` have a limitation of a 6 names and 256 chars * the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual length. Due to default ``svc, default.svc`` subdomains, the actual
limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true` limits are a 4 names and 239 chars respectively.
added you are back to 6 names.
* the ``nameservers`` have a limitation of a 3 servers, although there * the ``nameservers`` have a limitation of a 3 servers, although there
is a way to mitigate that with the ``upstream_dns_servers``, is a way to mitigate that with the ``upstream_dns_servers``,

View file

@ -2,8 +2,6 @@
Flannel is a network fabric for containers, designed for Kubernetes Flannel is a network fabric for containers, designed for Kubernetes
Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard`
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31). **Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
## Verifying flannel install ## Verifying flannel install

View file

@ -4,14 +4,14 @@ Google Cloud Platform can be used for creation of Kubernetes Service Load Balanc
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify: This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
```ShellSession ```
--cloud-provider=gce --cloud-provider=gce
--cloud-config=/etc/kubernetes/cloud-config --cloud-config=/etc/kubernetes/cloud-config
``` ```
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`: To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
```yaml ```
cloud_provider: gce cloud_provider: gce
gce_node_tags: k8s-lb gce_node_tags: k8s-lb
``` ```

View file

@ -41,18 +41,7 @@ kube_encrypt_secret_data: true
kube_encryption_resources: [secrets] kube_encryption_resources: [secrets]
kube_encryption_algorithm: "secretbox" kube_encryption_algorithm: "secretbox"
kube_apiserver_enable_admission_plugins: kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
- EventRateLimit
- AlwaysPullImages
- ServiceAccount
- NamespaceLifecycle
- NodeRestriction
- LimitRanger
- ResourceQuota
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- PodNodeSelector
- PodSecurity
kube_apiserver_admission_control_config_file: true kube_apiserver_admission_control_config_file: true
# EventRateLimit plugin configuration # EventRateLimit plugin configuration
kube_apiserver_admission_event_rate_limits: kube_apiserver_admission_event_rate_limits:
@ -85,6 +74,7 @@ kube_kubeadm_scheduler_extra_args:
etcd_deployment_type: kubeadm etcd_deployment_type: kubeadm
## kubelet ## kubelet
kubelet_authorization_mode_webhook: true
kubelet_authentication_token_webhook: true kubelet_authentication_token_webhook: true
kube_read_only_port: 0 kube_read_only_port: 0
kubelet_rotate_server_certificates: true kubelet_rotate_server_certificates: true
@ -95,22 +85,10 @@ kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true kubelet_make_iptables_util_chains: true
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"] kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
kubelet_seccomp_default: true kubelet_seccomp_default: true
kubelet_systemd_hardening: true
# In case you have multiple interfaces in your
# control plane nodes and you want to specify the right
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations # additional configurations
kube_owner: root kube_owner: root
kube_cert_group: root kube_cert_group: root
# create a default Pod Security Configuration and deny running of insecure pods
# kube_system namespace is exempted by default
kube_pod_security_use_default: true
kube_pod_security_default_enforce: restricted
``` ```
Let's take a deep look to the resultant **kubernetes** configuration: Let's take a deep look to the resultant **kubernetes** configuration:
@ -120,8 +98,6 @@ Let's take a deep look to the resultant **kubernetes** configuration:
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this). * The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>). * The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top. * If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
![kubelet hardening](img/kubelet-hardening.png)
Once you have the file properly filled, you can run the **Ansible** command to start the installation: Once you have the file properly filled, you can run the **Ansible** command to start the installation:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

View file

@ -2,14 +2,6 @@
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software. kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
## Prerequisites
You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled.
```yaml
kube_proxy_strict_arp: true
```
## Install ## Install
You have to explicitly enable the kube-vip extension: You have to explicitly enable the kube-vip extension:
@ -19,7 +11,7 @@ kube_vip_enabled: true
``` ```
You also need to enable You also need to enable
[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both): [kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
```yaml ```yaml
# HA for control-plane, requires a VIP # HA for control-plane, requires a VIP
@ -36,16 +28,16 @@ kube_vip_services_enabled: false
``` ```
> Note: When using `kube-vip` as LoadBalancer for services, > Note: When using `kube-vip` as LoadBalancer for services,
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/) [additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
are needed. are needed.
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) : If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
```yaml ```yaml
kube_vip_arp_enabled: true kube_vip_arp_enabled: true
``` ```
If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) : If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
```yaml ```yaml
kube_vip_bgp_enabled: true kube_vip_bgp_enabled: true

View file

@ -1,11 +1,10 @@
# Local Static Storage Provisioner # Local Storage Provisioner
The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume)
is NOT a dynamic storage provisioner as you would is NOT a dynamic storage provisioner as you would
expect from a cloud provider. Instead, it simply creates PersistentVolumes for expect from a cloud provider. Instead, it simply creates PersistentVolumes for
all mounts under the `host_dir` of the specified storage class. all mounts under the host_dir of the specified storage class.
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary. These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
Example: Example:
```yaml ```yaml
@ -23,12 +22,9 @@ local_volume_provisioner_storage_classes:
fs_type: ext4 fs_type: ext4
``` ```
For each key in `local_volume_provisioner_storage_classes` a "storage class" with For each key in `local_volume_provisioner_storage_classes` a storageClass with the
the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`. same name is created. The subkeys of each storage class are converted to camelCase and added
The subkeys of each storage class in `local_volume_provisioner_storage_classes` as attributes to the storageClass.
are converted to camelCase and added as attributes to the storage class in the
ConfigMap.
The result of the above example is: The result of the above example is:
```yaml ```yaml
@ -47,22 +43,12 @@ data:
fsType: ext4 fsType: ext4
``` ```
Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also The default StorageClass is local-storage on /mnt/disks,
created for each storage class: the rest of this doc will use that path as an example.
```bash
$ kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY
fast-disks kubernetes.io/no-provisioner Delete
local-storage kubernetes.io/no-provisioner Delete
```
The default StorageClass is `local-storage` on `/mnt/disks`;
the rest of this documentation will use that path as an example.
## Examples to create local storage volumes ## Examples to create local storage volumes
1. Using tmpfs 1. tmpfs method:
``` bash ``` bash
for vol in vol1 vol2 vol3; do for vol in vol1 vol2 vol3; do
@ -71,7 +57,7 @@ the rest of this documentation will use that path as an example.
done done
``` ```
The tmpfs method is not recommended for production because the mounts are not The tmpfs method is not recommended for production because the mount is not
persistent and data will be deleted on reboot. persistent and data will be deleted on reboot.
1. Mount physical disks 1. Mount physical disks
@ -93,16 +79,9 @@ the rest of this documentation will use that path as an example.
``` ```
This saves time of precreating filesystems. Note that your storageclass must have This saves time of precreating filesystems. Note that your storageclass must have
`volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
disk will be added as a raw block device. disk will be added as a raw block device.
1. PersistentVolumes with `volumeMode="Block"`
Just like above, you can create PersistentVolumes with volumeMode `Block`
by creating a symbolic link under discovery directory to the block device on
the node, if you set `volume_mode` to `"Block"`. This will create a volume
presented into a Pod as a block device, without any filesystem on it.
1. File-backed sparsefile method 1. File-backed sparsefile method
``` bash ``` bash
@ -117,15 +96,27 @@ the rest of this documentation will use that path as an example.
1. Simple directories 1. Simple directories
In a development environment, using `mount --bind` works also, but there is no capacity In a development environment using `mount --bind` works also, but there is no capacity
management. management.
1. Block volumeMode PVs
Create a symbolic link under discovery directory to the block device on the node. To use
raw block devices in pods, volume_type should be set to "Block".
## Usage notes ## Usage notes
Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for Beta PV.NodeAffinity field is used by default. If running against an older K8s
Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be version, the useAlphaAPI flag must be set in the configMap.
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
will be recreated and read the size correctly.
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
Flatcar Container Linux). Pods with persistent volume claims will not be
able to start if the mounts become unavailable. able to start if the mounts become unavailable.
## Further reading ## Further reading
Refer to the upstream docs here: <https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner> Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>

View file

@ -2,7 +2,7 @@
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers. It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode. The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
## Prerequisites ## Prerequisites
@ -19,7 +19,6 @@ You have to explicitly enable the MetalLB extension and set an IP address range
```yaml ```yaml
metallb_enabled: true metallb_enabled: true
metallb_speaker_enabled: true metallb_speaker_enabled: true
metallb_avoid_buggy_ips: true
metallb_ip_range: metallb_ip_range:
- 10.5.0.0/16 - 10.5.0.0/16
``` ```
@ -70,11 +69,10 @@ metallb_peers:
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement. When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses). See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range` In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
```yaml ```yaml
metallb_speaker_enabled: false metallb_speaker_enabled: false
metallb_avoid_buggy_ips: true
metallb_ip_range: metallb_ip_range:
- 10.5.0.0/16 - 10.5.0.0/16
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}" calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
@ -92,13 +90,11 @@ metallb_additional_address_pools:
- 10.6.0.0/16 - 10.6.0.0/16
protocol: "bgp" protocol: "bgp"
auto_assign: false auto_assign: false
avoid_buggy_ips: true
kube_service_pool_2: kube_service_pool_2:
ip_range: ip_range:
- 10.10.0.0/16 - 10.10.0.0/16
protocol: "bgp" protocol: "bgp"
auto_assign: false auto_assign: false
avoid_buggy_ips: true
calico_advertise_service_loadbalancer_ips: calico_advertise_service_loadbalancer_ips:
- 10.5.0.0/16 - 10.5.0.0/16
- 10.6.0.0/16 - 10.6.0.0/16

View file

@ -1,66 +0,0 @@
# Public Download Mirror
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
## Configuring Kubespray to use a mirror site
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
```shell
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
```
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
You can replace the `m.daocloud.io` with any site you want.
## Example Usage Full Steps
You can follow the full steps to use the kubesray with mirror. for example:
Install Ansible according to Ansible installation guide then run the following steps:
```shell
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# Use the download mirror
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
EOF
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
```
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
## Community-run mirror sites
DaoCloud(China)
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)

View file

@ -124,7 +124,7 @@ to
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed. With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars. If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
### 3) Edit cluster-info configmap in kube-public namespace ### 3) Edit cluster-info configmap in kube-system namespace
`kubectl edit cm -n kube-public cluster-info` `kubectl edit cm -n kube-public cluster-info`

View file

@ -12,7 +12,7 @@ ntp_enabled: true
The NTP service would be enabled and sync time automatically. The NTP service would be enabled and sync time automatically.
## Customize the NTP configure file ## Custimize the NTP configure file
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file. In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
@ -26,15 +26,6 @@ ntp_servers:
- "3.your-ntp-server.org iburst" - "3.your-ntp-server.org iburst"
``` ```
## Setting the TimeZone
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
```ShellSession
ntp_enabled: true
ntp_timezone: Etc/UTC
```
## Advanced Configure ## Advanced Configure
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true. Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.

View file

@ -1,25 +1,12 @@
# Offline environment # Offline environment
In case your servers don't have access to the internet directly (for example In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
when deploying on premises with security constraints), you need to get the
following artifacts in advance from another environment where has access to the internet.
* Some static files (zips and binaries)
* OS packages (rpm/deb files)
* Container images used by Kubespray. Exhaustive list depends on your setup
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
* [Optional] Helm chart files (only required if `helm_enabled=true`)
Then you need to setup the following services on your offline environment:
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries) * a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
* an internal Yum/Deb repository for OS packages * an internal Yum/Deb repository for OS packages
* an internal container image registry that need to be populated with all container images used by Kubespray * an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
* [Optional] an internal PyPi server for python packages used by Kubespray * [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
* [Optional] an internal Helm registry for Helm chart files * [Optional] an internal Helm registry (only required if `helm_enabled=true`)
You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script.
In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md).
## Configure Inventory ## Configure Inventory
@ -36,7 +23,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl" kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet" kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
# etcd is optional if you **DON'T** use etcd_deployment=host # etcd is optional if you **DON'T** use etcd_deployment=host
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# If using Calico # If using Calico

View file

@ -1,11 +0,0 @@
# OpenEuler
[OpenEuler](https://www.openeuler.org/en/) Linux is supported with docker and containerd runtimes.
**Note:** that OpenEuler Linux is not currently covered in kubespray CI and
support for it is currently considered experimental.
At present, only `openEuler 22.03 LTS` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
There are no special considerations for using OpenEuler Linux as the target OS
for Kubespray deployments.

View file

@ -34,6 +34,52 @@ Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expecte
Unless you are using calico or kube-router you can now run the playbook. Unless you are using calico or kube-router you can now run the playbook.
**Additional step needed when using calico or kube-router:**
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.
## The external cloud provider ## The external cloud provider
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21. The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
@ -110,49 +156,3 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider. - Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
- Run the `cluster.yml` playbook - Run the `cluster.yml` playbook
## Additional step needed when using calico or kube-router
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.

View file

@ -14,8 +14,8 @@ hands-on guide to get started with Kubespray.
## Cluster Details ## Cluster Details
* [kubespray](https://github.com/kubernetes-sigs/kubespray) * [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x
* [kubernetes](https://github.com/kubernetes/kubernetes) * [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
## Prerequisites ## Prerequisites
@ -466,7 +466,7 @@ kubectl logs $POD_NAME
#### Exec #### Exec
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container). In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container).
Print the nginx version by executing the `nginx -v` command in the `nginx` container: Print the nginx version by executing the `nginx -v` command in the `nginx` container:

View file

@ -1,9 +0,0 @@
# UOS Linux
UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes.
**Note:** that UOS Linux is not currently covered in kubespray CI and
support for it is currently considered experimental.
There are no special considerations for using UOS Linux as the target OS
for Kubespray deployments.

View file

@ -15,7 +15,7 @@ Some variables of note include:
* *calico_version* - Specify version of Calico to use * *calico_version* - Specify version of Calico to use
* *calico_cni_version* - Specify version of Calico CNI plugin to use * *calico_cni_version* - Specify version of Calico CNI plugin to use
* *docker_version* - Specify version of Docker to use (should be quoted * *docker_version* - Specify version of Docker to used (should be quoted
string). Must match one of the keys defined for *docker_versioned_pkg* string). Must match one of the keys defined for *docker_versioned_pkg*
in `roles/container-engine/docker/vars/*.yml`. in `roles/container-engine/docker/vars/*.yml`.
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd` * *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
@ -28,7 +28,6 @@ Some variables of note include:
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode * *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes version * *kube_version* - Specify a given Kubernetes version
* *searchdomains* - Array of DNS domains to search when looking up hostnames * *searchdomains* - Array of DNS domains to search when looking up hostnames
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
* *nameservers* - Array of nameservers to use for DNS lookup * *nameservers* - Array of nameservers to use for DNS lookup
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled. * *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
@ -167,9 +166,7 @@ variables to match your requirements.
addition to Kubespray deployed DNS addition to Kubespray deployed DNS
* *nameservers* - Array of DNS servers configured for use by hosts * *nameservers* - Array of DNS servers configured for use by hosts
* *searchdomains* - Array of up to 4 search domains * *searchdomains* - Array of up to 4 search domains
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns * *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers
For more information, see [DNS For more information, see [DNS
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md). Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
@ -178,47 +175,26 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *docker_options* - Commonly used to set * *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000`` ``--insecure-registry=myregistry.mydomain:5000``
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install. * *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin. * *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin. * *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars. [Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a * *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node. that correspond to each node.
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. * *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
By default autodetection is used to match container manager configuration. By default autodetection is used to match container manager configuration.
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`. `systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates * *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches. from the kube-apiserver when the certificate expiration approaches.
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates * *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches. from the kube-apiserver when the certificate expiration approaches.
**Note** that server certificates are **not** approved automatically. Approve them manually **Note** that server certificates are **not** approved automatically. Approve them manually
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like (`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp). [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed. * *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host. * *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
* *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable.
Example:
The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`.
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
* *node_labels* - Labels applied to nodes via `kubectl label node`.
For example, labels can be set in the inventory as variables or more widely in group_vars. For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can only be defined as a dict: *node_labels* can only be defined as a dict:

View file

@ -31,13 +31,12 @@ You need to source the vSphere credentials you use to deploy your machines that
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller | | vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use | | vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use |
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use | | vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use |
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrar image tag to use | | vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrat image tag to use |
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use | | vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use | vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy | | vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state | | vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state | | vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run |
## Usage example ## Usage example

View file

@ -21,14 +21,14 @@ After this step you should have:
### Kubespray configuration ### Kubespray configuration
First in `inventory/sample/group_vars/all/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`. First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
```yml ```yml
cloud_provider: "external" cloud_provider: "external"
external_cloud_provider: "vsphere" external_cloud_provider: "vsphere"
``` ```
Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below. Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
| Variable | Required | Type | Choices | Default | Comment | | Variable | Required | Type | Choices | Default | Comment |
|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------| |----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------|

View file

@ -1,140 +0,0 @@
---
## Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
# access_ip: 1.1.1.1
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
# loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
## If the cilium is going to be used in strict mode, we can use the
## localhost connection and not use the external LB. If this parameter is
## not specified, the first node to connect to kubeapi will be used.
# use_localhost_as_kubeapi_loadbalancer: true
## Local loadbalancer should use this port
## And must be set port 6443
loadbalancer_apiserver_port: 6443
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false
## Upstream dns servers
# upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
# cloud_provider:
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud'
## When openstack or vsphere are used make sure to source in the required fields
# external_cloud_provider:
## Set these proxy values in order to update package manager and docker daemon to use proxies
# http_proxy: ""
# https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
# no_proxy: ""
## Some problems may occur when downloading files over https proxy due to ansible bug
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
# download_validate_certs: False
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
# additional_no_proxy: ""
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
## skip_http_proxy_on_os_packages to true
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
# skip_http_proxy_on_os_packages: false
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
no_proxy_exclude_workers: false
## Certificate Management
## This setting determines whether certs are generated via scripts.
## Chose 'none' if you provide your own certificates.
## Option is "script", "none"
# cert_management: script
## Set to true to allow pre-checks to fail and continue deployment
# ignore_assert_errors: false
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
# kube_read_only_port: 10255
## Set true to download and cache container
# download_container: true
## Deploy container engine
# Set false if you want to deploy container engine manually.
# deploy_container_engine: true
## Red Hat Enterprise Linux subscription registration
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
## Update RHEL subscription purpose usage, role and SLA if necessary
# rh_subscription_username: ""
# rh_subscription_password: ""
# rh_subscription_org_id: ""
# rh_subscription_activation_key: ""
# rh_subscription_usage: "Development"
# rh_subscription_role: "Red Hat Enterprise Server"
# rh_subscription_sla: "Self-Support"
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
# ping_access_ip: true
# sysctl_file_path to add sysctl conf to
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
# kube_webhook_token_auth_url: https://...
## base64-encoded string of the webhook's CA certificate
# kube_webhook_token_auth_ca_data: "LS0t..."
## NTP Settings
# Start the ntpd or chrony service and enable it at system boot.
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"
## Used to control no_log attribute
unsafe_show_logs: false

View file

@ -1,9 +0,0 @@
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
## and configure the parameters below
# aws_ebs_csi_enabled: true
# aws_ebs_csi_enable_volume_scheduling: true
# aws_ebs_csi_enable_volume_snapshot: false
# aws_ebs_csi_enable_volume_resizing: false
# aws_ebs_csi_controller_replicas: 1
# aws_ebs_csi_plugin_image_tag: latest
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'

View file

@ -1,40 +0,0 @@
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
# azure_cloud:
# azure_tenant_id:
# azure_subscription_id:
# azure_aad_client_id:
# azure_aad_client_secret:
# azure_resource_group:
# azure_location:
# azure_subnet_name:
# azure_security_group_name:
# azure_security_group_resource_group:
# azure_vnet_name:
# azure_vnet_resource_group:
# azure_route_table_name:
# azure_route_table_resource_group:
# supported values are 'standard' or 'vmss'
# azure_vmtype: standard
## Azure Disk CSI credentials and parameters
## see docs/azure-csi.md for details on how to get these values
# azure_csi_tenant_id:
# azure_csi_subscription_id:
# azure_csi_aad_client_id:
# azure_csi_aad_client_secret:
# azure_csi_location:
# azure_csi_resource_group:
# azure_csi_vnet_name:
# azure_csi_vnet_resource_group:
# azure_csi_subnet_name:
# azure_csi_security_group_name:
# azure_csi_use_instance_metadata:
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
## To enable Azure Disk CSI, uncomment below
# azure_csi_enabled: true
# azure_csi_controller_replicas: 1
# azure_csi_plugin_image_tag: latest

View file

@ -1,50 +0,0 @@
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0
# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"
# containerd_runc_runtime:
# name: runc
# type: "io.containerd.runc.v2"
# engine: ""
# root: ""
# containerd_additional_runtimes:
# Example for Kata Containers as additional runtime:
# - name: kata
# type: "io.containerd.kata.v2"
# engine: ""
# root: ""
# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216
# containerd_debug_level: "info"
# containerd_metrics_address: ""
# containerd_metrics_grpc_histogram: false
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define mirror.registry.io or 172.19.16.11:5000
## set "name": "url". insecure url must be started http://
## Port number is also needed if the default HTTPS port is not used.
# containerd_insecure_registries:
# "localhost": "http://127.0.0.1"
# "172.19.16.11:5000": "http://172.19.16.11:5000"
# containerd_registries:
# "docker.io": "https://registry-1.docker.io"
# containerd_max_container_log_line_size: -1
# containerd_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass

View file

@ -1,2 +0,0 @@
## Does coreos need auto upgrade, default is true
# coreos_auto_upgrade: true

View file

@ -1,6 +0,0 @@
# crio_insecure_registries:
# - 10.0.0.2:5000
# crio_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass

View file

@ -1,59 +0,0 @@
---
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
# docker_storage_options: -s overlay2
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
docker_container_storage_setup: false
## It must be define a disk path for docker_container_storage_setup_devs.
## Otherwise docker-storage-setup will be executed incorrectly.
# docker_container_storage_setup_devs: /dev/vdb
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
## Valid options are systemd or cgroupfs, default is systemd
# docker_cgroup_driver: systemd
## Only set this if you have more than 3 nameservers:
## If true Kubespray will only use the first 3, otherwise it will fail
docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
docker_iptables_enabled: "false"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
# define docker bin_dir
docker_bin_dir: "/usr/bin"
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
# kubespray deletes the docker package on each run, so caching the package makes sense
docker_rpm_keepcache: 1
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define 172.19.16.11 or mirror.registry.io
# docker_insecure_registries:
# - mirror.registry.io
# - 172.19.16.11
## Add other registry,example China registry mirror.
# docker_registry_mirrors:
# - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com
## If non-empty will override default system MountFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
# docker_mount_flags:
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
# docker_options: ""

View file

@ -1,16 +0,0 @@
---
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## Additionally you can set this to kubeadm if you want to install etcd using kubeadm
## Kubeadm etcd deployment is experimental and only available for new deployments
## If this is not set, container manager will be inherited from the Kubespray defaults
## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want.
## Also this makes possible to use different container manager for etcd nodes.
# container_manager: containerd
## Settings for etcd deployment type
# Set this to docker if you are using container_manager: docker
etcd_deployment_type: host

View file

@ -1,10 +0,0 @@
## GCP compute Persistent Disk CSI Driver credentials and parameters
## See docs/gcp-pd-csi.md for information about the implementation
## Specify the path to the file containing the service account credentials
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
## To enable GCP Persistent Disk CSI driver, uncomment below
# gcp_pd_csi_enabled: true
# gcp_pd_csi_controller_replicas: 1
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"

View file

@ -1,14 +0,0 @@
## Values for the external Hcloud Cloud Controller
# external_hcloud_cloud:
# hcloud_api_token: ""
# token_secret_name: hcloud
# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support
# service_account_name: cloud-controller-manager
#
# controller_image_tag: "latest"
# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
# ## Format:
# ## external_hcloud_cloud.controller_extra_args:
# ## arg1: "value1"
# ## arg2: "value2"
# controller_extra_args: {}

View file

@ -1,28 +0,0 @@
## When Oracle Cloud Infrastructure is used, set these variables
# oci_private_key:
# oci_region_id:
# oci_tenancy_id:
# oci_user_id:
# oci_user_fingerprint:
# oci_compartment_id:
# oci_vnc_id:
# oci_subnet1_id:
# oci_subnet2_id:
## Override these default/optional behaviors if you wish
# oci_security_list_management: All
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
# oci_security_lists:
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
# oci_use_instance_principals: false
# oci_cloud_controller_version: 0.6.0
## If you would like to control OCI query rate limits for the controller
# oci_rate_limit:
# rate_limit_qps_read:
# rate_limit_qps_write:
# rate_limit_bucket_read:
# rate_limit_bucket_write:
## Other optional variables
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)

View file

@ -1,103 +0,0 @@
---
## Global Offline settings
### Private Container Image Registry
# registry_host: "myprivateregisry.com"
# files_repo: "http://myprivatehttpd"
### If using CentOS, RedHat, AlmaLinux or Fedora
# yum_repo: "http://myinternalyumrepo"
### If using Debian
# debian_repo: "http://myinternaldebianrepo"
### If using Ubuntu
# ubuntu_repo: "http://myinternalubunturepo"
## Container Registry overrides
# kube_image_repo: "{{ registry_host }}"
# gcr_image_repo: "{{ registry_host }}"
# github_image_repo: "{{ registry_host }}"
# docker_image_repo: "{{ registry_host }}"
# quay_image_repo: "{{ registry_host }}"
## Kubernetes components
# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
## CNI Plugins
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
## cri-tools
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
# [Optional] Calico: If using Calico network plugin
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
# [Optional] Cilium: If using Cilium network plugin
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
# [Optional] Flannel: If using Falnnel network plugin
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
# [Optional] helm: only if you set helm_enabled: true
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
# [Optional] crun: only if you set crun_enabled: true
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
# [Optional] kata: only if you set kata_containers_enabled: true
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
# [Optional] cri-dockerd: only if you set container_manager: docker
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
# [Optional] cri-o: only if you set container_manager: crio
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
# [Optional] runc,containerd: only if you set container_runtime: containerd
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
## CentOS/Redhat/AlmaLinux
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
### By default we enable those repo automatically
# rhel_enable_repos: false
### Docker / Containerd
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Fedora
### Docker
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
### Containerd
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Debian
### Docker
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
### Containerd
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
# containerd_debian_repo_repokey: 'YOURREPOKEY'
## Ubuntu
### Docker
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
### Containerd
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'

View file

@ -1,49 +0,0 @@
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
# openstack_blockstorage_ignore_volume_az: yes
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
# openstack_lbaas_enabled: True
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
# openstack_lbaas_create_monitor: "yes"
# openstack_lbaas_monitor_delay: "1m"
# openstack_lbaas_monitor_timeout: "30s"
# openstack_lbaas_monitor_max_retries: "3"
## Values for the external OpenStack Cloud Controller
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
# external_openstack_lbaas_method: "ROUND_ROBIN"
# external_openstack_lbaas_provider: "octavia"
# external_openstack_lbaas_create_monitor: false
# external_openstack_lbaas_monitor_delay: "1m"
# external_openstack_lbaas_monitor_timeout: "30s"
# external_openstack_lbaas_monitor_max_retries: "3"
# external_openstack_lbaas_manage_security_groups: false
# external_openstack_lbaas_internal_lb: false
# external_openstack_network_ipv6_disabled: false
# external_openstack_network_internal_networks: []
# external_openstack_network_public_networks: []
# external_openstack_metadata_search_order: "configDrive,metadataService"
## Application credentials to authenticate against Keystone API
## Those settings will take precedence over username and password that might be set your environment
## All of them are required
# external_openstack_application_credential_name:
# external_openstack_application_credential_id:
# external_openstack_application_credential_secret:
## The tag of the external OpenStack Cloud Controller image
# external_openstack_cloud_controller_image_tag: "latest"
## To use Cinder CSI plugin to provision volumes set this value to true
## Make sure to source in the openstack credentials
# cinder_csi_enabled: true
# cinder_csi_controller_replicas: 1

View file

@ -1,24 +0,0 @@
## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi
## To use UpClouds CSI plugin to provision volumes set this value to true
## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD
# upcloud_csi_enabled: true
# upcloud_csi_controller_replicas: 1
## Override used image tags
# upcloud_csi_provisioner_image_tag: "v3.1.0"
# upcloud_csi_attacher_image_tag: "v3.4.0"
# upcloud_csi_resizer_image_tag: "v1.4.0"
# upcloud_csi_plugin_image_tag: "v0.3.3"
# upcloud_csi_node_image_tag: "v2.5.0"
# upcloud_tolerations: []
## Storage class options
# storage_classes:
# - name: standard
# is_default: true
# expand_persistent_volumes: true
# parameters:
# tier: maxiops
# - name: hdd
# is_default: false
# expand_persistent_volumes: true
# parameters:
# tier: hdd

View file

@ -1,32 +0,0 @@
## Values for the external vSphere Cloud Provider
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
# external_vsphere_vcenter_port: "443"
# external_vsphere_insecure: "true"
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
# external_vsphere_datacenter: "DATACENTER_name"
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
## Vsphere version where located VMs
# external_vsphere_version: "6.7u3"
## Tags for the external vSphere Cloud Provider images
## gcr.io/cloud-provider-vsphere/cpi/release/manager
# external_vsphere_cloud_controller_image_tag: "latest"
## gcr.io/cloud-provider-vsphere/csi/release/syncer
# vsphere_syncer_image_tag: "v2.5.1"
## registry.k8s.io/sig-storage/csi-attacher
# vsphere_csi_attacher_image_tag: "v3.4.0"
## gcr.io/cloud-provider-vsphere/csi/release/driver
# vsphere_csi_controller: "v2.5.1"
## registry.k8s.io/sig-storage/livenessprobe
# vsphere_csi_liveness_probe_image_tag: "v2.6.0"
## registry.k8s.io/sig-storage/csi-provisioner
# vsphere_csi_provisioner_image_tag: "v3.1.0"
## registry.k8s.io/sig-storage/csi-resizer
## makes sense only for vSphere version >=7.0
# vsphere_csi_resizer_tag: "v1.3.0"
## To use vSphere CSI plugin to provision volumes set this value to true
# vsphere_csi_enabled: true
# vsphere_csi_controller_replicas: 1

View file

@ -1,26 +0,0 @@
---
## Etcd auto compaction retention for mvcc key value store in hour
# etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
# etcd_metrics: basic
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
## This value is only relevant when deploying etcd with `etcd_deployment_type: docker`
# etcd_memory_limit: "512M"
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
## etcd documentation for more information.
# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it.
# etcd_quota_backend_bytes: "2147483648"
# Maximum client request size in bytes the server will accept.
# etcd is designed to handle small key value pairs typical for metadata.
# Larger requests will work, but may increase the latency of other requests
# etcd_max_request_bytes: "1572864"
### ETCD: disable peer client cert authentication.
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
# etcd_peer_client_auth: true

View file

@ -1,228 +0,0 @@
---
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Helm deployment
helm_enabled: false
# Registry deployment
registry_enabled: false
# registry_namespace: kube-system
# registry_storage_class: ""
# registry_disk_size: "10Gi"
# Metrics Server deployment
metrics_server_enabled: false
# metrics_server_container_port: 4443
# metrics_server_kubelet_insecure_tls: true
# metrics_server_metric_resolution: 15s
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
# metrics_server_host_network: false
# metrics_server_replicas: 1
# Rancher Local Path Provisioner
local_path_provisioner_enabled: false
# local_path_provisioner_namespace: "local-path-storage"
# local_path_provisioner_storage_class: "local-path"
# local_path_provisioner_reclaim_policy: Delete
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
# local_path_provisioner_debug: false
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
# local_path_provisioner_image_tag: "v0.0.22"
# local_path_provisioner_helper_image_repo: "busybox"
# local_path_provisioner_helper_image_tag: "latest"
# Local volume provisioner deployment
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: kube-system
# local_volume_provisioner_nodelabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/region
# - topology.kubernetes.io/zone
# local_volume_provisioner_storage_classes:
# local-storage:
# host_dir: /mnt/disks
# mount_dir: /mnt/disks
# volume_mode: Filesystem
# fs_type: ext4
# fast-disks:
# host_dir: /mnt/fast-disks
# mount_dir: /mnt/fast-disks
# block_cleaner_command:
# - "/scripts/shred.sh"
# - "2"
# volume_mode: Filesystem
# fs_type: ext4
# local_volume_provisioner_tolerations:
# - effect: NoSchedule
# operator: Exists
# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
# csi_snapshot_controller_enabled: false
# csi snapshot namespace
# snapshot_controller_namespace: kube-system
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# cephfs_provisioner_reclaim_policy: Delete
# cephfs_provisioner_claim_root: /volumes
# cephfs_provisioner_deterministic_names: true
# RBD provisioner deployment
rbd_provisioner_enabled: false
# rbd_provisioner_namespace: rbd-provisioner
# rbd_provisioner_replicas: 2
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# rbd_provisioner_pool: kube
# rbd_provisioner_admin_id: admin
# rbd_provisioner_secret_name: ceph-secret-admin
# rbd_provisioner_secret: ceph-key-admin
# rbd_provisioner_user_id: kube
# rbd_provisioner_user_secret_name: ceph-secret-user
# rbd_provisioner_user_secret: ceph-key-user
# rbd_provisioner_user_secret_namespace: rbd-provisioner
# rbd_provisioner_fs_type: ext4
# rbd_provisioner_image_format: "2"
# rbd_provisioner_image_features: layering
# rbd_provisioner_storage_class: rbd
# rbd_provisioner_reclaim_policy: Delete
# Nginx ingress controller deployment
ingress_nginx_enabled: true
# ingress_nginx_host_network: false
ingress_publish_status_address: ""
# ingress_nginx_nodeselector:
# kubernetes.io/os: "linux"
# ingress_nginx_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
# ingress_nginx_configmap:
# map-hash-bucket-size: "128"
# ssl-protocols: "TLSv1.2 TLSv1.3"
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/coredns:53"
# ingress_nginx_extra_args:
# - --default-ssl-certificate=default/foo-tls
# ingress_nginx_termination_grace_period_seconds: 300
# ingress_nginx_class: nginx
# ALB ingress controller deployment
ingress_alb_enabled: false
# alb_ingress_aws_region: "us-east-1"
# alb_ingress_restrict_scheme: "false"
# Enables logging on all outbound requests sent to the AWS API.
# If logging is desired, set to true.
# alb_ingress_aws_debug: "false"
# Cert manager deployment
cert_manager_enabled: true
# cert_manager_namespace: "cert-manager"
# cert_manager_tolerations:
# - key: node-role.kubernetes.io/master
# effect: NoSchedule
# - key: node-role.kubernetes.io/control-plane
# effect: NoSchedule
# cert_manager_affinity:
# nodeAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# preference:
# matchExpressions:
# - key: node-role.kubernetes.io/control-plane
# operator: In
# values:
# - ""
# cert_manager_nodeselector:
# kubernetes.io/os: "linux"
# cert_manager_trusted_internal_ca: |
# -----BEGIN CERTIFICATE-----
# [REPLACE with your CA certificate]
# -----END CERTIFICATE-----
# cert_manager_leader_election_namespace: kube-system
# MetalLB deployment
metallb_enabled: true
metallb_speaker_enabled: "{{ metallb_enabled }}"
metallb_ip_range:
- "192.168.30.201-192.168.30.239"
metallb_pool_name: "private-lan"
# metallb_auto_assign: true
# metallb_avoid_buggy_ips: false
# metallb_speaker_nodeselector:
# kubernetes.io/os: "linux"
# metallb_controller_nodeselector:
# kubernetes.io/os: "linux"
# metallb_speaker_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# metallb_controller_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# metallb_version: v0.12.1
metallb_protocol: "layer2"
# metallb_port: "7472"
# metallb_memberlist_port: "7946"
metallb_additional_address_pools:
public-lan:
ip_range:
- "192.168.1.30-192.168.1.34"
protocol: "layer2"
auto_assign: false
# avoid_buggy_ips: false
# metallb_protocol: "bgp"
# metallb_peers:
# - peer_address: 192.0.2.1
# peer_asn: 64512
# my_asn: 4200000000
# - peer_address: 192.0.2.2
# peer_asn: 64513
# my_asn: 4200000000
argocd_enabled: true
argocd_version: v2.5.5
# argocd_namespace: argocd
# Default password:
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
# ---
# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command:
# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
# ---
# Use the following var to set admin password
# argocd_admin_password: "password"
# The plugin manager for kubectl
krew_enabled: false
krew_root_dir: "/usr/local/krew"

View file

@ -1,350 +0,0 @@
---
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace.
# Editing those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.25.5
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the user that owns tha cluster installation.
kube_owner: kube
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changeable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Directory where credentials will be stored
credentials_dir: "{{ inventory_dir }}/credentials"
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
# kube_oidc_auth: false
# kube_token_auth: false
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
# kube_oidc_url: https:// ...
# kube_oidc_client_id: kubernetes
## Optional settings for OIDC
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
# kube_oidc_username_claim: sub
# kube_oidc_username_prefix: 'oidc:'
# kube_oidc_groups_claim: groups
# kube_oidc_groups_prefix: 'oidc:'
## Variables to control webhook authn/authz
# kube_webhook_token_auth: false
# kube_webhook_token_auth_url: https://...
# kube_webhook_token_auth_url_skip_tls_verify: false
## For webhook authorization, authorization_modes must include Webhook
# kube_webhook_authorization: false
# kube_webhook_authorization_url: https://...
# kube_webhook_authorization_url_skip_tls_verify: false
# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
kube_network_plugin_multus: false
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node for pod IP address allocation. Note that the number of pods per node is
# also limited by the kubelet_max_pods variable which defaults to 110.
#
# Example:
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
# - kube_pods_subnet: 10.233.64.0/18
# - kube_network_node_prefix: 24
# - kubelet_max_pods: 110
#
# Example:
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
# - kube_pods_subnet: 10.233.64.0/18
# - kube_network_node_prefix: 25
# - kubelet_max_pods: 110
kube_network_node_prefix: 24
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
enable_dual_stack_networks: false
# Kubernetes internal network for IPv6 services, unused block of space.
# This is only used if enable_dual_stack_networks is set to true
# This provides 4096 IPv6 IPs
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
# This network must not already be in your network infrastructure!
# This is only used if enable_dual_stack_networks is set to true.
# This provides room for 256 nodes with 254 pods per node.
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# IPv6 subnet size allocated to each for pods.
# This is only used if enable_dual_stack_networks is set to true
# This provides room for 254 pods per node.
kube_network_node_prefix_ipv6: 120
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)
# Kube-proxy proxyMode configuration.
# Can be ipvs, iptables
kube_proxy_mode: ipvs
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
kube_proxy_strict_arp: true
# A string slice of values which specify the addresses to use for NodePorts.
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
# The default empty string slice ([]) means to use all local addresses.
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
kube_proxy_nodeport_addresses: >-
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
[{{ kube_proxy_nodeport_addresses_cidr }}]
{%- else -%}
[]
{%- endif -%}
# If non-empty, will use this string as identification instead of the actual hostname
# kube_override_hostname: >-
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
# {%- else -%}
# {{ inventory_hostname }}
# {%- endif -%}
## Encrypting Secret Data at Rest
kube_encrypt_secret_data: false
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow
# non-critical podsa to also terminate gracefully
# kubelet_shutdown_grace_period: 60s
# kubelet_shutdown_grace_period_critical_pods: 20s
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: distrilab.org
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# dns_timeout: 2
# dns_attempts: 2
# Custom search domains to be added in addition to the default cluster search domains
# searchdomains:
# - svc.{{ cluster_name }}
# - default.svc.{{ cluster_name }}
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
# remove_default_searchdomains: false
# Can be coredns, coredns_dual, manual or none
dns_mode: coredns
# Set manual server if using a custom cluster DNS server
# manual_dns_server: 10.x.x.x
# Enable nodelocal dns cache
enable_nodelocaldns: true
enable_nodelocaldns_secondary: false
nodelocaldns_ip: 169.254.25.10
nodelocaldns_health_port: 9254
nodelocaldns_second_health_port: 9256
nodelocaldns_bind_metrics_host_ip: false
nodelocaldns_secondary_skew_seconds: 5
# nodelocaldns_external_zones:
# - zones:
# - example.com
# - example.io:1053
# nameservers:
# - 1.1.1.1
# - 2.2.2.2
# cache: 5
# - zones:
# - https://mycompany.local:4453
# nameservers:
# - 192.168.0.53
# cache: 0
# - zones:
# - mydomain.tld
# nameservers:
# - 10.233.0.3
# cache: 5
# rewrite:
# - name website.tld website.namespace.svc.cluster.local
# Enable k8s_external plugin for CoreDNS
enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local
# Enable endpoint_pod_names option for kubernetes plugin
enable_coredns_k8s_endpoint_pod_names: false
# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config
# dns_upstream_forward_extra_opts:
# policy: sequential
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: host_resolvconf
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## Default: containerd
container_manager: containerd
# Additional container runtimes
kata_containers_enabled: false
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# audit log for kubernetes
kubernetes_audit: false
# define kubelet config dir for dynamic kubelet
# kubelet_config_dir:
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
podsecuritypolicy_enabled: false
# Custom PodSecurityPolicySpec for restricted policy
# podsecuritypolicy_restricted_spec: {}
# Custom PodSecurityPolicySpec for privileged policy
# podsecuritypolicy_privileged_spec: {}
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
kubeconfig_localhost: true
# Use ansible_host as external api ip when copying over kubeconfig.
# kubeconfig_localhost_ansible_host: false
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
# kubectl_localhost: false
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
# kubelet_enforce_node_allocatable: pods
## Optionally reserve resources for OS system daemons.
# system_reserved: true
## Uncomment to override default values
# system_memory_reserved: 512Mi
# system_cpu_reserved: 500m
# system_ephemeral_storage_reserved: 2Gi
## Reservation for master hosts
# system_master_memory_reserved: 256Mi
# system_master_cpu_reserved: 250m
# system_master_ephemeral_storage_reserved: 2Gi
## Eviction Thresholds to avoid system OOMs
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
# eviction_hard: {}
# eviction_hard_control_plane: {}
# An alternative flexvolume plugin directory
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
## Supplementary addresses that can be added in kubernetes ssl keys.
## That can be useful for example to setup a keepalived virtual IP
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
persistent_volumes_enabled: false
## Container Engine Acceleration
## Enable container acceleration feature, for example use gpu acceleration in containers
# nvidia_accelerator_enabled: true
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
## Labels and taints won't be set to nodes if they are not in the array.
# nvidia_gpu_nodes:
# - kube-gpu-001
# nvidia_driver_version: "384.111"
## flavor can be tesla or gtx
# nvidia_gpu_flavor: gtx
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
## NVIDIA GPU device plugin image.
# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
# tls_min_version: ""
## Support tls cipher suites.
# tls_cipher_suites: {}
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
# - TLS_RSA_WITH_AES_128_GCM_SHA256
# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_RSA_WITH_RC4_128_SHA
## Amount of time to retain events. (default 1h0m0s)
event_ttl_duration: "1h0m0s"
## Automatically renew K8S control plane certificates on first Monday of each month
auto_renew_certificates: false
# First Monday of each month
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
# kubeadm patches path
kubeadm_patches:
enabled: false
source_dir: "{{ inventory_dir }}/patches"
dest_dir: "{{ kube_config_dir }}/patches"

View file

@ -1,131 +0,0 @@
---
# see roles/network_plugin/calico/defaults/main.yml
# the default value of name
calico_cni_name: k8s-pod-network
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
# peer_with_router: false
# Enables Internet connectivity from containers
# nat_outgoing: true
# Enables Calico CNI "host-local" IPAM plugin
# calico_ipam_host_local: true
# add default ippool name
# calico_pool_name: "default-pool"
# add default ippool blockSize (defaults kube_network_node_prefix)
calico_pool_blocksize: 26
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
# calico_pool_cidr: 1.2.3.4/5
# add default ippool CIDR to CNI config
# calico_cni_pool: true
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# Add default IPV6 IPPool CIDR to CNI config
# calico_cni_pool_ipv6: true
# Global as_num (/calico/bgp/v1/global/as_num)
# global_as_num: "64512"
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
# to be true. All other cases, false.
# calico_no_global_as_num: false
# You can set MTU value here. If left undefined or empty, it will
# not be specified in calico CNI config, so Calico will use built-in
# defaults. The value should be a number, not a string.
# calico_mtu: 1500
# Configure the MTU to use for workload interfaces and tunnels.
# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
# calico_veth_mtu: 1440
# Advertise Cluster IPs
# calico_advertise_cluster_ips: true
# Advertise Service External IPs
# calico_advertise_service_external_ips:
# - x.x.x.x/24
# - y.y.y.y/32
# Advertise Service LoadBalancer IPs
# calico_advertise_service_loadbalancer_ips:
# - x.x.x.x/24
# - y.y.y.y/16
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
# calico_datastore: "kdd"
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
# calico_iptables_backend: "Auto"
# Use typha (only with kdd)
# typha_enabled: false
# Generate TLS certs for secure typha<->calico-node communication
# typha_secure: false
# Scaling typha: 1 replica per 100 nodes is adequate
# Number of typha replicas
# typha_replicas: 1
# Set max typha connections
# typha_max_connections_lower_limit: 300
# Set calico network backend: "bird", "vxlan" or "none"
# bird enable BGP routing, required for ipip and no encapsulation modes
# calico_network_backend: vxlan
# IP in IP and VXLAN is mutualy exclusive modes.
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
# calico_ipip_mode: 'Never'
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
# calico_vxlan_mode: 'Always'
# set VXLAN port and VNI
# calico_vxlan_vni: 4096
# calico_vxlan_port: 4789
# Enable eBPF mode
# calico_bpf_enabled: false
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
# * can-reach=DESTINATION
# * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/reference/node/configuration
# calico_ip_auto_method: "interface=eth.*"
# calico_ip6_auto_method: "interface=eth.*"
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the hosts interface for MTU auto-detection.
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
# Choose the iptables insert mode for Calico: "Insert" or "Append".
# calico_felix_chaininsertmode: Insert
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
# calico_use_default_route_src_ipaddr: false
# Enable calico traffic encryption with wireguard
# calico_wireguard_enabled: false
# Under certain situations liveness and readiness probes may need tunning
# calico_node_livenessprobe_timeout: 10
# calico_node_readinessprobe_timeout: 10
# Calico apiserver (only with kdd)
# calico_apiserver_enabled: false

View file

@ -1,10 +0,0 @@
# see roles/network_plugin/canal/defaults/main.yml
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is choosing using the node's
# default route.
# canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
# canal_masquerade: "true"

View file

@ -1,245 +0,0 @@
---
# cilium_version: "v1.12.1"
# Log-level
# cilium_debug: false
# cilium_mtu: ""
# cilium_enable_ipv4: true
# cilium_enable_ipv6: false
# Cilium agent health port
# cilium_agent_health_port: "9879"
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# `kubectl get ciliumid`
# - "kvstore" stores identities in an etcd kvstore.
# - In order to support External Workloads, "crd" is required
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
# - KVStore operations are only required when cilium-operator is running with any of the below options:
# - --synchronize-k8s-services
# - --synchronize-k8s-nodes
# - --identity-allocation-mode=kvstore
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
# cilium_identity_allocation_mode: kvstore
# Etcd SSL dirs
# cilium_cert_dir: /etc/cilium/certs
# kube_etcd_cacert_file: ca.pem
# kube_etcd_cert_file: cert.pem
# kube_etcd_key_file: cert-key.pem
# Limits for apps
# cilium_memory_limit: 500M
# cilium_cpu_limit: 500m
# cilium_memory_requests: 64M
# cilium_cpu_requests: 100m
# Overlay Network Mode
# cilium_tunnel_mode: vxlan
# Optional features
# cilium_enable_prometheus: false
# Enable if you want to make use of hostPort mappings
# cilium_enable_portmap: false
# Monitor aggregation level (none/low/medium/maximum)
# cilium_monitor_aggregation: medium
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
# cilium_monitor_aggregation_flags: "all"
# Kube Proxy Replacement mode (strict/probe/partial)
# cilium_kube_proxy_replacement: probe
# If upgrading from Cilium < 1.5, you may want to override some of these options
# to prevent service disruptions. See also:
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
# cilium_preallocate_bpf_maps: false
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
# cilium_tofqdns_enable_poller: false
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
# cilium_enable_legacy_services: false
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
# This value is not defined by default
# cilium_cluster_id:
# Deploy cilium even if kube_network_plugin is not cilium.
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
# cilium_deploy_additionally: false
# Auto direct nodes routes can be used to advertise pods routes in your cluster
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
# This works only if you have a L2 connectivity between all your nodes.
# You wil also have to specify the variable `cilium_native_routing_cidr` to
# make this work. Please refer to the cilium documentation for more
# information about this kind of setups.
# cilium_auto_direct_node_routes: false
# Allows to explicitly specify the IPv4 CIDR for native routing.
# When specified, Cilium assumes networking for this CIDR is preconfigured and
# hands traffic destined for that range to the Linux network stack without
# applying any SNAT.
# Generally speaking, specifying a native routing CIDR implies that Cilium can
# depend on the underlying networking stack to route packets to their
# destination. To offer a concrete example, if Cilium is configured to use
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
# the user must configure the routes to reach pods, either manually or by
# setting the auto-direct-node-routes flag.
# cilium_native_routing_cidr: ""
# Allows to explicitly specify the IPv6 CIDR for native routing.
# cilium_native_routing_cidr_ipv6: ""
# Enable transparent network encryption.
# cilium_encryption_enabled: false
# Encryption method. Can be either ipsec or wireguard.
# Only effective when `cilium_encryption_enabled` is set to true.
# cilium_encryption_type: "ipsec"
# Enable encryption for pure node to node traffic.
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
# cilium_ipsec_node_encryption: false
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
# it will fallback on the wireguard-go user-space implementation of WireGuard.
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
# cilium_wireguard_userspace_fallback: false
# IP Masquerade Agent
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
# cilium_ip_masq_agent_enable: false
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
# cilium_non_masquerade_cidrs:
# - 10.0.0.0/8
# - 172.16.0.0/12
# - 192.168.0.0/16
# - 100.64.0.0/10
# - 192.0.0.0/24
# - 192.0.2.0/24
# - 192.88.99.0/24
# - 198.18.0.0/15
# - 198.51.100.0/24
# - 203.0.113.0/24
# - 240.0.0.0/4
### Indicates whether to masquerade traffic to the link local prefix.
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
# cilium_masq_link_local: false
### A time interval at which the agent attempts to reload config from disk
# cilium_ip_masq_resync_interval: 60s
# Hubble
### Enable Hubble without install
# cilium_enable_hubble: false
### Enable Hubble Metrics
# cilium_enable_hubble_metrics: false
### if cilium_enable_hubble_metrics: true
# cilium_hubble_metrics: {}
# - dns
# - drop
# - tcp
# - flow
# - icmp
# - http
### Enable Hubble install
# cilium_hubble_install: false
### Enable auto generate certs if cilium_hubble_install: true
# cilium_hubble_tls_generate: false
# IP address management mode for v1.9+.
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
# cilium_ipam_mode: kubernetes
# Extra arguments for the Cilium agent
# cilium_agent_custom_args: []
# For adding and mounting extra volumes to the cilium agent
# cilium_agent_extra_volumes: []
# cilium_agent_extra_volume_mounts: []
# cilium_agent_extra_env_vars: []
# cilium_operator_replicas: 2
# The address at which the cillium operator bind health check api
# cilium_operator_api_serve_addr: "127.0.0.1:9234"
## A dictionary of extra config variables to add to cilium-config, formatted like:
## cilium_config_extra_vars:
## var1: "value1"
## var2: "value2"
# cilium_config_extra_vars: {}
# For adding and mounting extra volumes to the cilium operator
# cilium_operator_extra_volumes: []
# cilium_operator_extra_volume_mounts: []
# Extra arguments for the Cilium Operator
# cilium_operator_custom_args: []
# Name of the cluster. Only relevant when building a mesh of clusters.
# cilium_cluster_name: default
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
# Available for Cilium v1.10 and up.
# cilium_cni_exclusive: true
# Configure the log file for CNI logging with retention policy of 7 days.
# Disable CNI file logging by setting this field to empty explicitly.
# Available for Cilium v1.12 and up.
# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
# -- Configure cgroup related configuration
# -- Enable auto mount of cgroup2 filesystem.
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
# volume will be mounted inside the cilium agent pod at the same path.
# Available for Cilium v1.11 and up
# cilium_cgroup_auto_mount: true
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
# cilium_cgroup_host_root: "/run/cilium/cgroupv2"
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
# cilium_bpf_map_dynamic_size_ratio: "0.0"
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
# Available for Cilium v1.10 and up
# cilium_enable_ipv4_masquerade: true
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
# Available for Cilium v1.10 and up
# cilium_enable_ipv6_masquerade: true
# -- Enable native IP masquerade support in eBPF
# cilium_enable_bpf_masquerade: false
# -- Configure whether direct routing mode should route traffic via
# host stack (true) or directly and more efficiently out of BPF (false) if
# the kernel supports it. The latter has the implication that it will also
# bypass netfilter in the host namespace.
# cilium_enable_host_legacy_routing: true
# -- Enable use of the remote node identity.
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
# cilium_enable_remote_node_identity: true
# -- Enable the use of well-known identities.
# cilium_enable_well_known_identities: false
# cilium_enable_bpf_clock_probe: true
# -- Whether to enable CNP status updates.
# cilium_disable_cnp_status_updates: true

View file

@ -1,18 +0,0 @@
# see roles/network_plugin/flannel/defaults/main.yml
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item
# flannel_interface:
## Select interface that should be used for flannel operations by regexp on Name or IP
## This is actually an inventory cluster-level item
## example: select interface with ip from net 10.0.0.0/23
## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard'
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
# flannel_backend_type: "vxlan"
# flannel_vxlan_vni: 1
# flannel_vxlan_port: 8472
# flannel_vxlan_direct_routing: false

View file

@ -1,57 +0,0 @@
---
# geneve or vlan
kube_ovn_network_type: geneve
# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module
kube_ovn_tunnel_type: geneve
## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use.
# kube_ovn_iface: eth1
## The MTU used by pod iface in overlay networks (default iface MTU - 100)
# kube_ovn_mtu: 1333
## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port.
kube_ovn_hw_offload: false
# traffic mirror
kube_ovn_traffic_mirror: false
# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# kube_ovn_default_interface_name: eth0
kube_ovn_external_address: 8.8.8.8
kube_ovn_external_address_ipv6: 2400:3200::1
kube_ovn_external_dns: alauda.cn
# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0
kube_ovn_default_gateway_check: true
kube_ovn_default_logical_gateway: false
# kube_ovn_default_exclude_ips: 10.16.0.1
kube_ovn_node_switch_cidr: 100.64.0.0/16
kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64
## vlan config, set default interface name and vlan id
# kube_ovn_default_interface_name: eth0
kube_ovn_default_vlan_id: 100
kube_ovn_vlan_name: product
## pod nic type, support: veth-pair or internal-port
kube_ovn_pod_nic_type: veth_pair
## Enable load balancer
kube_ovn_enable_lb: true
## Enable network policy support
kube_ovn_enable_np: true
## Enable external vpc support
kube_ovn_enable_external_vpc: true
## Enable checksum
kube_ovn_encap_checksum: true
## enable ssl
kube_ovn_enable_ssl: false
## dpdk
kube_ovn_dpdk_enabled: false

View file

@ -1,64 +0,0 @@
# See roles/network_plugin/kube-router//defaults/main.yml
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
# kube_router_run_router: true
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
# kube_router_run_firewall: true
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
# see docs/kube-router.md "Caveats" section
# kube_router_run_service_proxy: false
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
# kube_router_advertise_cluster_ip: false
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_external_ip: false
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_loadbalancer_ip: false
# Adjust manifest of kube-router daemonset template with DSR needed changes
# kube_router_enable_dsr: false
# Array of arbitrary extra arguments to kube-router, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
# kube_router_extra_args: []
# ASN number of the cluster, used when communicating with external BGP routers
# kube_router_cluster_asn: ~
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
# kube_router_peer_router_asns: ~
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
# kube_router_peer_router_ips: ~
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
# kube_router_peer_router_ports: ~
# Setups node CNI to allow hairpin mode, requires node reboots, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
# kube_router_support_hairpin_mode: false
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
# kube_router_dns_policy: ClusterFirstWithHostNet
# Array of annotations for master
# kube_router_annotations_master: []
# Array of annotations for every node
# kube_router_annotations_node: []
# Array of common annotations for every node
# kube_router_annotations_all: []
# Enables scraping kube-router metrics with Prometheus
# kube_router_enable_metrics: false
# Path to serve Prometheus metrics on
# kube_router_metrics_path: /metrics
# Prometheus metrics port to use
# kube_router_metrics_port: 9255

View file

@ -1,6 +0,0 @@
---
# private interface, on a l2-network
macvlan_interface: "eth1"
# Enable nat in default gateway network interface
enable_nat_default_gateway: true

View file

@ -1,64 +0,0 @@
# see roles/network_plugin/weave/defaults/main.yml
# Weave's network password for encryption, if null then no network encryption.
# weave_password: ~
# If set to 1, disable checking for new Weave Net versions (default is blank,
# i.e. check is enabled)
# weave_checkpoint_disable: false
# Soft limit on the number of connections between peers. Defaults to 100.
# weave_conn_limit: 100
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
# for containers attached. If you need to disable hairpin, e.g. your kernel is
# one of those that can panic if hairpin is enabled, then you can disable it by
# setting `HAIRPIN_MODE=false`.
# weave_hairpin_mode: true
# The range of IP addresses used by Weave Net and the subnet they are placed in
# (CIDR format; default 10.32.0.0/12)
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
# Set to 0 to disable Network Policy Controller (default is on)
# weave_expect_npc: "{{ enable_network_policy }}"
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
# list from the api-server)
# weave_kube_peers: ~
# Set the initialization mode of the IP Address Manager (defaults to consensus
# amongst the KUBE_PEERS)
# weave_ipalloc_init: ~
# Set the IP address used as a gateway from the Weave network to the host
# network - this is useful if you are configuring the addon as a static pod.
# weave_expose_ip: ~
# Address and port that the Weave Net daemon will serve Prometheus-style
# metrics on (defaults to 0.0.0.0:6782)
# weave_metrics_addr: ~
# Address and port that the Weave Net daemon will serve status requests on
# (defaults to disabled)
# weave_status_addr: ~
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
# underlying network has a tighter limit, or set a larger size for better
# performance if your network supports jumbo frames (e.g. 8916)
# weave_mtu: 1376
# Set to 1 to preserve the client source IP address when accessing Service
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
# only with Weave IPAM (default).
# weave_no_masq_local: true
# set to nft to use nftables backend for iptables (default is iptables)
# weave_iptables_backend: iptables
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
# weave_extra_args: ~
# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error
# weave_npc_extra_args: ~

View file

@ -1,41 +0,0 @@
# ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
[all]
vig-k8s-e01 ansible_host=192.168.30.35 etcd_member_name=etcd01
vig-k8s-e02 ansible_host=192.168.30.36 etcd_member_name=etcd02
vig-k8s-e03 ansible_host=192.168.30.37 etcd_member_name=etcd03
vig-k8s-c01 ansible_host=192.168.30.38
vig-k8s-c02 ansible_host=192.168.30.39
vig-k8s-c03 ansible_host=192.168.30.40
vig-k8s-w01 ansible_host=192.168.30.41
vig-k8s-w02 ansible_host=192.168.30.42
vig-k8s-w03 ansible_host=192.168.30.43
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
# ## configure a bastion host if your nodes are not directly reachable
# [bastion]
# bastion ansible_host=x.x.x.x ansible_user=some_user
[kube_control_plane]
vig-k8s-c01
vig-k8s-c02
vig-k8s-c03
[etcd]
vig-k8s-e01
vig-k8s-e02
vig-k8s-e03
[kube_node]
vig-k8s-w01
vig-k8s-w02
vig-k8s-w03
[calico_rr]
[k8s_cluster:children]
kube_control_plane
kube_node
calico_rr

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10257'

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10259'

View file

@ -35,11 +35,6 @@ loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES ### OTHER OPTIONAL VARIABLES
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false
## Upstream dns servers ## Upstream dns servers
# upstream_dns_servers: # upstream_dns_servers:
# - 8.8.8.8 # - 8.8.8.8
@ -135,6 +130,3 @@ ntp_servers:
- "1.pool.ntp.org iburst" - "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst" - "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst" - "3.pool.ntp.org iburst"
## Used to control no_log attribute
unsafe_show_logs: false

View file

@ -37,9 +37,6 @@
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore # [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" # calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
# [Optional] Cilium: If using Cilium network plugin
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
# [Optional] Flannel: If using Falnnel network plugin # [Optional] Flannel: If using Falnnel network plugin
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" # flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
@ -64,10 +61,6 @@
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" # containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" # nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
## CentOS/Redhat/AlmaLinux ## CentOS/Redhat/AlmaLinux
### For EL7, base and extras repo must be available, for EL8, baseos and appstream ### For EL7, base and extras repo must be available, for EL8, baseos and appstream
### By default we enable those repo automatically ### By default we enable those repo automatically
@ -89,8 +82,8 @@
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" # docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" # docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
### Containerd ### Containerd
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" # containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" # containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
# containerd_debian_repo_repokey: 'YOURREPOKEY' # containerd_debian_repo_repokey: 'YOURREPOKEY'
## Ubuntu ## Ubuntu

View file

@ -7,18 +7,13 @@
# upcloud_csi_provisioner_image_tag: "v3.1.0" # upcloud_csi_provisioner_image_tag: "v3.1.0"
# upcloud_csi_attacher_image_tag: "v3.4.0" # upcloud_csi_attacher_image_tag: "v3.4.0"
# upcloud_csi_resizer_image_tag: "v1.4.0" # upcloud_csi_resizer_image_tag: "v1.4.0"
# upcloud_csi_plugin_image_tag: "v0.3.3" # upcloud_csi_plugin_image_tag: "v0.2.1"
# upcloud_csi_node_image_tag: "v2.5.0" # upcloud_csi_node_image_tag: "v2.5.0"
# upcloud_tolerations: [] # upcloud_tolerations: []
## Storage class options ## Storage class options
# expand_persistent_volumes: true
# parameters:
# tier: maxiops # or hdd
# storage_classes: # storage_classes:
# - name: standard # - name: standard
# is_default: true # is_default: true
# expand_persistent_volumes: true
# parameters:
# tier: maxiops
# - name: hdd
# is_default: false
# expand_persistent_volumes: true
# parameters:
# tier: hdd

View file

@ -18,8 +18,6 @@ metrics_server_enabled: false
# metrics_server_kubelet_insecure_tls: true # metrics_server_kubelet_insecure_tls: true
# metrics_server_metric_resolution: 15s # metrics_server_metric_resolution: 15s
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" # metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
# metrics_server_host_network: false
# metrics_server_replicas: 1
# Rancher Local Path Provisioner # Rancher Local Path Provisioner
local_path_provisioner_enabled: false local_path_provisioner_enabled: false
@ -163,12 +161,11 @@ cert_manager_enabled: false
# MetalLB deployment # MetalLB deployment
metallb_enabled: false metallb_enabled: false
metallb_speaker_enabled: "{{ metallb_enabled }}" metallb_speaker_enabled: true
# metallb_ip_range: # metallb_ip_range:
# - "10.5.0.50-10.5.0.99" # - "10.5.0.50-10.5.0.99"
# metallb_pool_name: "loadbalanced" # metallb_pool_name: "loadbalanced"
# metallb_auto_assign: true # metallb_auto_assign: true
# metallb_avoid_buggy_ips: false
# metallb_speaker_nodeselector: # metallb_speaker_nodeselector:
# kubernetes.io/os: "linux" # kubernetes.io/os: "linux"
# metallb_controller_nodeselector: # metallb_controller_nodeselector:
@ -201,7 +198,6 @@ metallb_speaker_enabled: "{{ metallb_enabled }}"
# - "10.5.1.50-10.5.1.99" # - "10.5.1.50-10.5.1.99"
# protocol: "layer2" # protocol: "layer2"
# auto_assign: false # auto_assign: false
# avoid_buggy_ips: false
# metallb_protocol: "bgp" # metallb_protocol: "bgp"
# metallb_peers: # metallb_peers:
# - peer_address: 192.0.2.1 # - peer_address: 192.0.2.1
@ -211,8 +207,9 @@ metallb_speaker_enabled: "{{ metallb_enabled }}"
# peer_asn: 64513 # peer_asn: 64513
# my_asn: 4200000000 # my_asn: 4200000000
argocd_enabled: false argocd_enabled: false
# argocd_version: v2.4.16 # argocd_version: v2.4.7
# argocd_namespace: argocd # argocd_namespace: argocd
# Default password: # Default password:
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli # - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli

View file

@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.25.5 kube_version: v1.24.3
# Where the binaries will be downloaded. # Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G) # Note: ensure that you've enough disk space (about 1G)
@ -125,7 +125,7 @@ kube_apiserver_port: 6443 # (https)
kube_proxy_mode: ipvs kube_proxy_mode: ipvs
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface # configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
# must be set to true for MetalLB, kube-vip(ARP enabled) to work # must be set to true for MetalLB to work
kube_proxy_strict_arp: false kube_proxy_strict_arp: false
# A string slice of values which specify the addresses to use for NodePorts. # A string slice of values which specify the addresses to use for NodePorts.
@ -160,14 +160,6 @@ kube_encrypt_secret_data: false
cluster_name: cluster.local cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2 ndots: 2
# dns_timeout: 2
# dns_attempts: 2
# Custom search domains to be added in addition to the default cluster search domains
# searchdomains:
# - svc.{{ cluster_name }}
# - default.svc.{{ cluster_name }}
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
# remove_default_searchdomains: false
# Can be coredns, coredns_dual, manual or none # Can be coredns, coredns_dual, manual or none
dns_mode: coredns dns_mode: coredns
# Set manual server if using a custom cluster DNS server # Set manual server if using a custom cluster DNS server
@ -193,21 +185,11 @@ nodelocaldns_secondary_skew_seconds: 5
# nameservers: # nameservers:
# - 192.168.0.53 # - 192.168.0.53
# cache: 0 # cache: 0
# - zones:
# - mydomain.tld
# nameservers:
# - 10.233.0.3
# cache: 5
# rewrite:
# - name website.tld website.namespace.svc.cluster.local
# Enable k8s_external plugin for CoreDNS # Enable k8s_external plugin for CoreDNS
enable_coredns_k8s_external: false enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local coredns_k8s_external_zone: k8s_external.local
# Enable endpoint_pod_names option for kubernetes plugin # Enable endpoint_pod_names option for kubernetes plugin
enable_coredns_k8s_endpoint_pod_names: false enable_coredns_k8s_endpoint_pod_names: false
# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config
# dns_upstream_forward_extra_opts:
# policy: sequential
# Can be docker_dns, host_resolvconf or none # Can be docker_dns, host_resolvconf or none
resolvconf_mode: host_resolvconf resolvconf_mode: host_resolvconf
@ -342,9 +324,3 @@ event_ttl_duration: "1h0m0s"
auto_renew_certificates: false auto_renew_certificates: false
# First Monday of each month # First Monday of each month
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" # auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
# kubeadm patches path
kubeadm_patches:
enabled: false
source_dir: "{{ inventory_dir }}/patches"
dest_dir: "{{ kube_config_dir }}/patches"

View file

@ -60,7 +60,7 @@ calico_pool_blocksize: 26
# - x.x.x.x/24 # - x.x.x.x/24
# - y.y.y.y/32 # - y.y.y.y/32
# Advertise Service LoadBalancer IPs # Adveritse Service LoadBalancer IPs
# calico_advertise_service_loadbalancer_ips: # calico_advertise_service_loadbalancer_ips:
# - x.x.x.x/24 # - x.x.x.x/24
# - y.y.y.y/16 # - y.y.y.y/16
@ -99,7 +99,7 @@ calico_pool_blocksize: 26
# calico_vxlan_vni: 4096 # calico_vxlan_vni: 4096
# calico_vxlan_port: 4789 # calico_vxlan_port: 4789
# Enable eBPF mode # Cenable eBPF mode
# calico_bpf_enabled: false # calico_bpf_enabled: false
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: # If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
@ -109,10 +109,6 @@ calico_pool_blocksize: 26
# calico_ip_auto_method: "interface=eth.*" # calico_ip_auto_method: "interface=eth.*"
# calico_ip6_auto_method: "interface=eth.*" # calico_ip6_auto_method: "interface=eth.*"
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the hosts interface for MTU auto-detection.
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
# Choose the iptables insert mode for Calico: "Insert" or "Append". # Choose the iptables insert mode for Calico: "Insert" or "Append".
# calico_felix_chaininsertmode: Insert # calico_felix_chaininsertmode: Insert

View file

@ -1,5 +1,5 @@
--- ---
# cilium_version: "v1.12.1" # cilium_version: "v1.11.7"
# Log-level # Log-level
# cilium_debug: false # cilium_debug: false
@ -118,7 +118,6 @@
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ # https://docs.cilium.io/en/stable/concepts/networking/masquerading/
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded # By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
# cilium_ip_masq_agent_enable: false # cilium_ip_masq_agent_enable: false
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded ### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
# cilium_non_masquerade_cidrs: # cilium_non_masquerade_cidrs:
# - 10.0.0.0/8 # - 10.0.0.0/8

View file

@ -10,9 +10,9 @@
## single quote and escape backslashes ## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' # flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' # You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
# for experimental backend
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md # please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
# flannel_backend_type: "vxlan" # flannel_backend_type: "vxlan"
# flannel_vxlan_vni: 1 # flannel_vxlan_vni: 1
# flannel_vxlan_port: 8472 # flannel_vxlan_port: 8472
# flannel_vxlan_direct_routing: false

View file

@ -16,7 +16,7 @@
# Add External IP of service to the RIB so that it gets advertised to the BGP peers. # Add External IP of service to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_external_ip: false # kube_router_advertise_external_ip: false
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. # Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_loadbalancer_ip: false # kube_router_advertise_loadbalancer_ip: false
# Adjust manifest of kube-router daemonset template with DSR needed changes # Adjust manifest of kube-router daemonset template with DSR needed changes

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10257'

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10259'

View file

@ -24,7 +24,7 @@
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: recover_control_plane/control-plane } - { role: recover_control_plane/control-plane }
- import_playbook: cluster.yml - include: cluster.yml
- hosts: kube_control_plane - hosts: kube_control_plane
environment: "{{ proxy_disable_env }}" environment: "{{ proxy_disable_env }}"

View file

@ -6,5 +6,5 @@ netaddr==0.7.19
pbr==5.4.4 pbr==5.4.4
jmespath==0.9.5 jmespath==0.9.5
ruamel.yaml==0.16.10 ruamel.yaml==0.16.10
ruamel.yaml.clib==0.2.7 ruamel.yaml.clib==0.2.6
MarkupSafe==1.1.1 MarkupSafe==1.1.1

View file

@ -6,5 +6,5 @@ netaddr==0.7.19
pbr==5.4.4 pbr==5.4.4
jmespath==0.9.5 jmespath==0.9.5
ruamel.yaml==0.16.10 ruamel.yaml==0.16.10
ruamel.yaml.clib==0.2.7 ruamel.yaml.clib==0.2.6
MarkupSafe==1.1.1 MarkupSafe==1.1.1

View file

@ -7,13 +7,13 @@ addusers:
etcd: etcd:
name: etcd name: etcd
comment: "Etcd user" comment: "Etcd user"
create_home: no createhome: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
kube: kube:
name: kube name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
create_home: no createhome: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
@ -24,4 +24,4 @@ adduser:
comment: "{{ user.comment|default(None) }}" comment: "{{ user.comment|default(None) }}"
shell: "{{ user.shell|default(None) }}" shell: "{{ user.shell|default(None) }}"
system: "{{ user.system|default(None) }}" system: "{{ user.system|default(None) }}"
create_home: "{{ user.create_home|default(None) }}" createhome: "{{ user.createhome|default(None) }}"

View file

@ -7,10 +7,10 @@
- name: User | Create User - name: User | Create User
user: user:
comment: "{{ user.comment|default(omit) }}" comment: "{{ user.comment|default(omit) }}"
create_home: "{{ user.create_home|default(omit) }}" createhome: "{{ user.createhome|default(omit) }}"
group: "{{ user.group|default(user.name) }}" group: "{{ user.group|default(user.name) }}"
home: "{{ user.home|default(omit) }}" home: "{{ user.home|default(omit) }}"
shell: "{{ user.shell|default(omit) }}" shell: "{{ user.shell|default(omit) }}"
name: "{{ user.name }}" name: "{{ user.name }}"
system: "{{ user.system|default(omit) }}" system: "{{ user.system|default(omit) }}"
when: user.name != "root" when: kube_owner != "root"

View file

@ -5,4 +5,4 @@ addusers:
shell: /sbin/nologin shell: /sbin/nologin
system: yes system: yes
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
create_home: no createhome: no

View file

@ -2,14 +2,14 @@
addusers: addusers:
- name: etcd - name: etcd
comment: "Etcd user" comment: "Etcd user"
create_home: yes createhome: yes
home: "{{ etcd_data_dir }}" home: "{{ etcd_data_dir }}"
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
- name: kube - name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
create_home: no createhome: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"

View file

@ -2,14 +2,14 @@
addusers: addusers:
- name: etcd - name: etcd
comment: "Etcd user" comment: "Etcd user"
create_home: yes createhome: yes
home: "{{ etcd_data_dir }}" home: "{{ etcd_data_dir }}"
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
- name: kube - name: kube
comment: "Kubernetes user" comment: "Kubernetes user"
create_home: no createhome: no
system: yes system: yes
shell: /sbin/nologin shell: /sbin/nologin
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"

View file

@ -25,8 +25,3 @@ override_system_hostname: true
is_fedora_coreos: false is_fedora_coreos: false
skip_http_proxy_on_os_packages: false skip_http_proxy_on_os_packages: false
# If this is true, debug information will be displayed but
# may contain some private data, so it is recommended to set it to false
# in the production environment.
unsafe_show_logs: false

View file

@ -84,7 +84,6 @@
- use_oracle_public_repo|default(true) - use_oracle_public_repo|default(true)
- '''ID="ol"'' in os_release.stdout_lines' - '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) >= 7.6 - (ansible_distribution_version | float) >= 7.6
- (ansible_distribution_version | float) < 9
# CentOS ships with python installed # CentOS ships with python installed

View file

@ -1,9 +1,5 @@
--- ---
# OpenSUSE ships with Python installed # OpenSUSE ships with Python installed
- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version
setup:
gather_subset: '!all'
filter: ansible_distribution_*version
- name: Check that /etc/sysconfig/proxy file exists - name: Check that /etc/sysconfig/proxy file exists
stat: stat:
@ -63,17 +59,6 @@
state: present state: present
update_cache: true update_cache: true
become: true become: true
when:
- ansible_distribution_version is version('15.4', '<')
- name: Install python3-cryptography
zypper:
name: python3-cryptography
state: present
update_cache: true
become: true
when:
- ansible_distribution_version is version('15.4', '>=')
# Nerdctl needs some basic packages to get an environment up # Nerdctl needs some basic packages to get an environment up
- name: Install basic dependencies - name: Install basic dependencies

View file

@ -65,7 +65,7 @@
notify: RHEL auto-attach subscription notify: RHEL auto-attach subscription
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
become: true become: true
no_log: "{{ not (unsafe_show_logs|bool) }}" no_log: true
when: when:
- rh_subscription_username is defined - rh_subscription_username is defined
- rh_subscription_status.changed - rh_subscription_status.changed

Some files were not shown because too many files have changed in this diff Show more