Compare commits

..

12 commits

Author SHA1 Message Date
Jeroen b75ee0b111
Define ostree variable for runc (#9417)
The ostree variable is not defined previously raising an error when
the runtime tries to read it.

Co-authored-by: Victor Morales <chipahuac@hotmail.com>
2022-10-23 21:22:25 -07:00
Zhong Jianxin 63e3f4dea9
[2.19] preinstall: Add nodelocaldns to supersede_nameserver if enabled (#9324)
When a machine that use dhclient and resolvconf reboots, this will make
/etc/resolv.conf remain close to the one before reboot
2022-09-27 00:07:51 -07:00
ERIK 1026b5974f
Update kubespray version to v2.19.1 (#9241) (#9253)
Signed-off-by: bo.jiang <bo.jiang@daocloud.io>

Signed-off-by: bo.jiang <bo.jiang@daocloud.io>
2022-09-05 18:48:55 -07:00
Cristian Calin 453dbcef1d
disable kubelet_authorization_mode_webhook by default (#9239) 2022-08-31 02:55:00 -07:00
Chad Swenson 4a6600002f
Fixes for calico etcd mode (2.19 backport) (#9234)
release-2.19 backport of #9228

It seems that PR #8839 broke `calico_datastore: etcd` when it removed ipamconfig support for etcd mode.

This PR fixes some failing tasks when calico_datastore == etcd, but it does not restore ipamconfig support for calico in etcd mode. If someone wants to restore ipamconfig support for calico_datastore: etcd please submit a follow up PR for that.
2022-08-30 10:02:55 -07:00
Sergey 6eb313584e
do not run etcd role in scale.yml playbook when etcd installed by kubeadm (#9210) (#9216) 2022-08-24 09:08:03 -07:00
Kenichi Omichi a270632466
Allow "openSUSE Tumbleweed" to be run (#9072) (#9082)
The commit 1ce2f04 tried to merge multiple SUSE OS checks including
"openSUSE Leap" and "openSUSE Tumbleweed" into a single SUSE, but
that was a perfect change.
Then the commit c16efc9 tried to fix it for "openSUSE Leap", but it
didn't take care of "openSUSE Tumbleweed".
Then this adds "openSUSE Tumbleweed" to the OS check.
2022-07-12 00:24:51 -07:00
Sébastien Masset 00550ba832
[2.19] Add missing configuration for extra tolerations (#8999)
* Added new configuration item for extra tolerations in policy controllers

Signed-off-by: Sébastien Masset <smt.masset@gmail.com>

* Added new configuration item for extra tolerations in DNS autoscaler

Signed-off-by: Sébastien Masset <smt.masset@gmail.com>

* Aligned existing handling of extra DNS tolerations

Signed-off-by: Sébastien Masset <smt.masset@gmail.com>
2022-06-23 01:41:44 -07:00
Kenichi Omichi b4951da405
calicoctl repo has been merged in calico (#8920) (#8972)
Co-authored-by: Mathieu Parent <mathieu.parent@insee.fr>
2022-06-14 02:07:33 -07:00
Chinthiti Wisetsombat cd93d10688
Fix: set fallback value of kubelet ip6 (#8858) (#8926) (#8942)
* Fix: set fallback value of kubelet ip6 (#8858)

* Prune the spurious comma in the end of kubelet_address

- Update `roles/kubernetes/node/defaults/main.yml`

Co-authored-by: Cristian Calin <6627509+cristicalin@users.noreply.github.com>

* Fix: set fallback value of kubelet ip6 (#8858)

- Apply the lint: 132606368e

Co-authored-by: Cristian Calin <6627509+cristicalin@users.noreply.github.com>

Co-authored-by: Ho Kim <ho.kim@ulagbulag.io>
Co-authored-by: Cristian Calin <6627509+cristicalin@users.noreply.github.com>
2022-06-08 10:08:22 -07:00
ERIK e6940d8a7b
update kubespray image tag in readme to v2.19.0 (#8938)
Signed-off-by: bo.jiang <bo.jiang@daocloud.io>
2022-06-07 09:22:56 -07:00
mahjonp dca5cde493
fix 8893#issuecomment-1147154353 (#8933) (#8939)
Signed-off-by: mahjonp <junpeng.man@gmail.com>
2022-06-07 09:18:56 -07:00
462 changed files with 5333 additions and 14807 deletions

7
.gitignore vendored
View file

@ -3,10 +3,7 @@
**/vagrant_ansible_inventory
*.iml
temp
contrib/offline/offline-files
contrib/offline/offline-files.tar.gz
.idea
.vscode
.tox
.cache
*.bak
@ -14,19 +11,16 @@ contrib/offline/offline-files.tar.gz
*.tfstate.backup
.terraform/
contrib/terraform/aws/credentials.tfvars
.terraform.lock.hcl
/ssh-bastion.conf
**/*.sw[pon]
*~
vagrant/
plugins/mitogen
deploy.sh
# Ansible inventory
inventory/*
!inventory/local
!inventory/sample
!inventory/c12s-sample
inventory/*/artifacts/
# Byte-compiled / optimized / DLL files
@ -114,4 +108,3 @@ roles/**/molecule/**/__pycache__/
# Temp location used by our scripts
scripts/tmp/
tmp.md

View file

@ -8,7 +8,7 @@ stages:
- deploy-special
variables:
KUBESPRAY_VERSION: v2.20.0
KUBESPRAY_VERSION: v2.18.1
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true"
@ -34,7 +34,7 @@ variables:
RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
TERRAFORM_VERSION: 1.0.8
ANSIBLE_MAJOR_VERSION: "2.11"
ANSIBLE_MAJOR_VERSION: "2.10"
before_script:
- ./tests/scripts/rebase.sh

View file

@ -68,20 +68,6 @@ markdownlint:
script:
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
check-readme-versions:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_readme_versions.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix:
stage: unit-tests
tags: [light]

View file

@ -44,7 +44,7 @@ molecule_no_container_engines:
molecule_docker:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
- ./tests/scripts/molecule_run.sh -i container-engine/docker
when: on_success
molecule_containerd:
@ -60,6 +60,13 @@ molecule_cri-o:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
when: on_success
molecule_cri-dockerd:
extends: .molecule
stage: deploy-part2
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success
# Stage 3 container engines don't get as much attention so allow them to fail
molecule_kata:
extends: .molecule

View file

@ -31,6 +31,23 @@ packet_ubuntu20-calico-aio:
variables:
RESET_CHECK: "true"
# Exericse ansible variants during the nightly jobs
packet_ubuntu20-calico-aio-ansible-2_9:
stage: deploy-part1
extends: .packet_periodic
when: on_success
variables:
ANSIBLE_MAJOR_VERSION: "2.9"
RESET_CHECK: "true"
packet_ubuntu20-calico-aio-ansible-2_10:
stage: deploy-part1
extends: .packet_periodic
when: on_success
variables:
ANSIBLE_MAJOR_VERSION: "2.10"
RESET_CHECK: "true"
packet_ubuntu20-calico-aio-ansible-2_11:
stage: deploy-part1
extends: .packet_periodic
@ -51,26 +68,11 @@ packet_ubuntu20-aio-docker:
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-aio-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu18-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-aio-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
@ -151,22 +153,12 @@ packet_almalinux8-calico:
extends: .packet_pr
when: on_success
packet_rockylinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora36-docker-weave:
packet_fedora34-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success
@ -224,19 +216,19 @@ packet_centos7-multus-calico:
extends: .packet_pr
when: manual
packet_centos7-canal-ha:
packet_oracle7-canal-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora36-docker-calico:
packet_fedora35-docker-calico:
stage: deploy-part2
extends: .packet_periodic
when: on_success
variables:
RESET_CHECK: "true"
packet_fedora35-calico-selinux:
packet_fedora34-calico-selinux:
stage: deploy-part2
extends: .packet_periodic
when: on_success
@ -256,7 +248,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
extends: .packet_pr
when: manual
packet_fedora36-kube-ovn:
packet_fedora34-kube-ovn:
stage: deploy-part2
extends: .packet_periodic
when: on_success
@ -264,7 +256,7 @@ packet_fedora36-kube-ovn:
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
packet_centos7-docker-weave-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
@ -284,7 +276,7 @@ packet_ubuntu20-calico-ha-wireguard:
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
packet_debian10-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success
@ -299,12 +291,7 @@ packet_almalinux8-calico-remove-node:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once:
packet_debian10-calico-upgrade-once:
stage: deploy-part3
extends: .packet_periodic
when: on_success

View file

@ -11,6 +11,6 @@ shellcheck:
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
- shellcheck --version
script:
# Run shellcheck for all *.sh
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
# Run shellcheck for all *.sh except contrib/
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
except: ['triggers', 'master']

View file

@ -43,7 +43,6 @@ vagrant_ubuntu20-flannel:
stage: deploy-part2
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu16-kube-router-sep:
stage: deploy-part2

View file

@ -1,3 +1,2 @@
---
MD013: false
MD029: false

View file

@ -1,48 +0,0 @@
---
repos:
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.27.1
hooks:
- id: yamllint
args: [--strict]
- repo: https://github.com/markdownlint/markdownlint
rev: v0.11.0
hooks:
- id: markdownlint
args: [ -r, "~MD013,~MD029" ]
exclude: "^.git"
- repo: local
hooks:
- id: ansible-lint
name: ansible-lint
entry: ansible-lint -v
language: python
pass_filenames: false
additional_dependencies:
- .[community]
- id: ansible-syntax-check
name: ansible-syntax-check
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
language: python
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
- id: tox-inventory-builder
name: tox-inventory-builder
entry: bash -c "cd contrib/inventory_builder && tox"
language: python
pass_filenames: false
- id: check-readme-versions
name: check-readme-versions
entry: tests/scripts/check_readme_versions.sh
language: script
pass_filenames: false
- id: ci-matrix
name: ci-matrix
entry: tests/scripts/md-table/test.sh
language: script
pass_filenames: false

View file

@ -16,12 +16,7 @@ pip install -r tests/requirements.txt
#### Linting
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
```ShellSession
pre-commit install
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
```
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
#### Molecule
@ -38,9 +33,7 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
1. Submit an issue describing your proposed change to the repo in question.
2. The [repo owners](OWNERS) will respond to your issue promptly.
3. Fork the desired repo, develop and test your code changes.
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
5. Addess any pre-commit validation failures.
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
7. Submit a pull request.
8. Work with the reviewers on their suggestions.
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
5. Submit a pull request.
6. Work with the reviewers on their suggestions.
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.

View file

@ -1,5 +1,5 @@
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
FROM ubuntu:focal-20220531
FROM ubuntu:focal-20220316
ARG ARCH=amd64
ARG TZ=Etc/UTC

View file

@ -8,8 +8,6 @@ aliases:
- floryut
- oomichi
- cristicalin
- liupeng0518
- yankay
kubespray-reviewers:
- holmsten
- bozzo
@ -18,7 +16,6 @@ aliases:
- jayonlau
- cristicalin
- liupeng0518
- yankay
kubespray-emeritus_approvers:
- riverzhang
- atoms

View file

@ -57,11 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession
git checkout v2.20.0
docker pull quay.io/kubespray/kubespray:v2.20.0
docker pull quay.io/kubespray/kubespray:v2.19.1
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.20.0 bash
quay.io/kubespray/kubespray:v2.19.1 bash
# Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
@ -112,67 +111,49 @@ vagrant up
- [Adding/replacing a node](docs/nodes.md)
- [Upgrades basics](docs/upgrades.md)
- [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md)
- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md)
## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
- **Fedora** 35, 36
- **Ubuntu** 16.04, 18.04, 20.04
- **CentOS/RHEL** 7, [8](docs/centos8.md)
- **Fedora** 34, 35
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Oracle Linux** 7, [8](docs/centos8.md)
- **Alma Linux** [8](docs/centos8.md)
- **Rocky Linux** [8](docs/centos8.md)
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
- [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.6.14
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- [containerd](https://containerd.io/) v1.6.4
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
- [calico](https://github.com/projectcalico/calico) v3.24.5
- [calico](https://github.com/projectcalico/calico) v3.22.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.12.1
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
- [cilium](https://github.com/cilium/cilium) v1.11.3
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
- [multus](https://github.com/intel/multus-cni) v3.8
- [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
- Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
- [coredns](https://github.com/coredns/coredns) v1.9.3
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- [argocd](https://argoproj.github.io/) v2.4.16
- [helm](https://helm.sh/) v3.9.4
- [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
- [coredns](https://github.com/coredns/coredns) v1.8.6
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
## Container Runtime Notes
@ -181,8 +162,8 @@ Note: Upstart/SysV init based OS types are not supported.
## Requirements
- **Minimum required version of Kubernetes is v1.23**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- **Minimum required version of Kubernetes is v1.21**
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**.
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
@ -250,7 +231,6 @@ See also [Network checker](docs/netcheck.md).
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests

View file

@ -9,10 +9,10 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
7. An approver creates a release branch in the form `release-X.Y`
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
10. The release issue is closed
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
## Major/minor releases and milestones
@ -61,23 +61,3 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
## Container image creation
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
```shell
cd kubespray/
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
```
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
```shell
cd kubespray/test-infra/vagrant-docker/
./build vX.Y.Z
```
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
If you don't have the permission, please ask it on the #kubespray-dev channel.

View file

@ -9,7 +9,5 @@
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
atoms
mattymo
floryut
oomichi
cristicalin

5
Vagrantfile vendored
View file

@ -28,10 +28,9 @@ SUPPORTED_OS = {
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},

View file

@ -1,6 +1,6 @@
[ssh_connection]
pipelining=True
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
@ -10,11 +10,11 @@ host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp
fact_caching_timeout = 86400
fact_caching_timeout = 7200
stdout_callback = default
display_skipped_hosts = no
library = ./library
callbacks_enabled = profile_tasks,ara_default
callback_whitelist = profile_tasks,ara_default
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg

View file

@ -3,20 +3,32 @@
gather_facts: false
become: no
vars:
minimal_ansible_version: 2.11.0
maximal_ansible_version: 2.14.0
minimal_ansible_version: 2.9.0
minimal_ansible_version_2_10: 2.10.11
maximal_ansible_version: 2.13.0
ansible_connection: local
tags: always
tasks:
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
assert:
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
that:
- ansible_version.string is version(minimal_ansible_version, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
tags:
- check
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
assert:
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
that:
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
when:
- ansible_version.string is version('2.10.0', ">=")
tags:
- check
- name: "Check that python netaddr is installed"
assert:
msg: "Python netaddr is not present"

View file

@ -35,7 +35,7 @@
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane
- hosts: etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@ -59,10 +59,7 @@
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False

View file

@ -17,7 +17,7 @@ pass_or_fail() {
test_distro() {
local distro=${1:?};shift
local extra="${*:-}"
local prefix="${distro[${extra}]}"
local prefix="$distro[${extra}]}"
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
pass_or_fail "$prefix: dind-nodes" || return 1
(cd ../..
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
echo "Loading file=${spec} ..."
. ${spec} || continue
: ${DISTROS:?} || continue
echo "DISTROS:" "${DISTROS[@]}"
echo "DISTROS=${DISTROS[@]}"
echo "EXTRAS->"
printf " %s\n" "${EXTRAS[@]}"
let n=1
for distro in "${DISTROS[@]}"; do
for distro in ${DISTROS[@]}; do
for extra in "${EXTRAS[@]:-NULL}"; do
# Magic value to let this for run once:
[[ ${extra} == NULL ]] && unset extra
docker rm -f "${NODES[@]}"
docker rm -f ${NODES[@]}
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
{
info "${distro}[${extra}] START: file_out=${file_out}"

View file

@ -13,7 +13,7 @@
# under the License.
import inventory
from io import StringIO
from test import support
import unittest
from unittest import mock
@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
'access_ip': '10.90.0.3'}}}})
with mock.patch('builtins.open', mock_io):
with self.assertRaises(SystemExit) as cm:
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
with support.captured_stdout() as stdout:
inventory.KubesprayInventory(
changed_hosts=["print_hostnames"],
config_file="file")

View file

@ -14,16 +14,12 @@ This role performs basic installation and setup of Gluster, but it does not conf
Available variables are listed below, along with default values (see `defaults/main.yml`):
```yaml
glusterfs_default_release: ""
```
glusterfs_default_release: ""
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
```yaml
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.5"
```
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.5"
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
@ -33,11 +29,9 @@ None.
## Example Playbook
```yaml
- hosts: server
roles:
- geerlingguy.glusterfs
```
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View file

@ -45,21 +45,3 @@ temp
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
## manage-offline-files.sh
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
Step(1) generate `files.list`
```shell
./generate_list.sh
```
Step(2) download files and run nginx container
```shell
./manage-offline-files.sh
```
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.

View file

@ -15,7 +15,7 @@ function create_container_image_tar() {
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
# NOTE: etcd and pause cannot be seen as pods.
# The pause image is used for --pod-infra-container-image option of kubelet.
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
IMAGES="${IMAGES} ${EXT_IMAGES}"
rm -f ${IMAGE_TAR_FILE}
@ -46,12 +46,12 @@ function create_container_image_tar() {
# NOTE: Here removes the following repo parts from each image
# so that these parts will be replaced with Kubespray.
# - kube_image_repo: "registry.k8s.io"
# - kube_image_repo: "k8s.gcr.io"
# - gcr_image_repo: "gcr.io"
# - docker_image_repo: "docker.io"
# - quay_image_repo: "quay.io"
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
[ "${FIRST_PART}" = "gcr.io" ] ||
[ "${FIRST_PART}" = "docker.io" ] ||
[ "${FIRST_PART}" = "quay.io" ] ||

View file

@ -1,44 +0,0 @@
#!/bin/bash
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
OFFLINE_FILES_DIR_NAME="offline-files"
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
NGINX_PORT=8080
# download files
if [ ! -f "${FILES_LIST}" ]; then
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
exit 1
fi
rm -rf "${OFFLINE_FILES_DIR}"
rm "${OFFLINE_FILES_ARCHIVE}"
mkdir "${OFFLINE_FILES_DIR}"
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
# run nginx container server
if command -v nerdctl 1>/dev/null 2>&1; then
runtime="nerdctl"
elif command -v podman 1>/dev/null 2>&1; then
runtime="podman"
elif command -v docker 1>/dev/null 2>&1; then
runtime="docker"
else
echo "No supported container runtime found"
exit 1
fi
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
if [ $? -ne 0 ]; then
sudo "${runtime}" run \
--restart=always -d -p ${NGINX_PORT}:80 \
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
--name nginx nginx:alpine
fi

View file

@ -1,39 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
include /etc/nginx/default.d/*.conf;
location / {
root /usr/share/nginx/html/download;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}

View file

@ -36,7 +36,8 @@ terraform apply -var-file=credentials.tfvars
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
Ansible automatically detects bastion and changes ssh_args
```commandline
ssh -F ./ssh-bastion.conf user@$ip

View file

@ -31,7 +31,9 @@ The setup looks like following
## Requirements
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
* Terraform 0.13.0 or newer
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
## Quickstart

View file

@ -3,8 +3,8 @@ provider "exoscale" {}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
prefix = var.prefix
zone = var.zone
prefix = var.prefix
machines = var.machines
ssh_public_keys = var.ssh_public_keys

View file

@ -2,18 +2,18 @@
${connection_strings_master}
${connection_strings_worker}
[kube_control_plane]
[kube-master]
${list_master}
[etcd]
${list_master}
[kube_node]
[kube-node]
${list_worker}
[k8s_cluster:children]
[k8s-cluster:children]
kube-master
kube-node
[k8s_cluster:vars]
[k8s-cluster:vars]
network_id=${network_id}

View file

@ -270,7 +270,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
@ -295,8 +294,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
using the `k8s_nodes` variable.
For example:
@ -316,7 +314,6 @@ k8s_nodes = {
"az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
"extra_groups" = "calico_rr"
}
}
```

View file

@ -84,7 +84,6 @@ module "compute" {
supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports
bastion_allowed_ports = var.bastion_allowed_ports
use_access_ip = var.use_access_ip
master_server_group_policy = var.master_server_group_policy
node_server_group_policy = var.node_server_group_policy
@ -97,11 +96,6 @@ module "compute" {
network_router_id = module.network.router_id
network_id = module.network.network_id
use_existing_network = var.use_existing_network
private_subnet_id = module.network.subnet_id
depends_on = [
module.network.subnet_id
]
}
output "private_subnet_id" {
@ -117,7 +111,7 @@ output "router_id" {
}
output "k8s_master_fips" {
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
}
output "k8s_node_fips" {

View file

@ -15,11 +15,8 @@ data "openstack_images_image_v2" "image_master" {
name = var.image_master == "" ? var.image : var.image_master
}
data "cloudinit_config" "cloudinit" {
part {
content_type = "text/cloud-config"
content = file("${path.module}/templates/cloudinit.yaml")
}
data "template_file" "cloudinit" {
template = file("${path.module}/templates/cloudinit.yaml")
}
data "openstack_networking_network_v2" "k8s_network" {
@ -85,17 +82,6 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
count = length(var.bastion_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes"
@ -209,9 +195,6 @@ resource "openstack_networking_port_v2" "bastion_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -224,7 +207,7 @@ resource "openstack_compute_instance_v2" "bastion" {
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_bastion
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -262,9 +245,6 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -278,7 +258,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
@ -325,9 +305,6 @@ resource "openstack_networking_port_v2" "k8s_masters_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -386,9 +363,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -402,7 +376,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
@ -449,9 +423,6 @@ resource "openstack_networking_port_v2" "etcd_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -465,7 +436,7 @@ resource "openstack_compute_instance_v2" "etcd" {
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_etcd
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
@ -506,9 +477,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -563,9 +531,6 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -579,7 +544,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
@ -621,9 +586,6 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -637,7 +599,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -684,9 +646,6 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -700,7 +659,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -742,9 +701,6 @@ resource "openstack_networking_port_v2" "k8s_nodes_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id
@ -758,7 +714,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = each.value.flavor
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
user_data = data.template_file.cloudinit.rendered
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
@ -786,7 +742,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
@ -804,9 +760,6 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
fixed_ip {
subnet_id = var.private_subnet_id
}
depends_on = [
var.network_router_id

View file

@ -136,10 +136,6 @@ variable "worker_allowed_ports" {
type = list
}
variable "bastion_allowed_ports" {
type = list
}
variable "use_access_ip" {}
variable "master_server_group_policy" {
@ -189,7 +185,3 @@ variable "port_security_enabled" {
variable "force_null_port_security" {
type = bool
}
variable "private_subnet_id" {
type = string
}

View file

@ -257,12 +257,6 @@ variable "worker_allowed_ports" {
]
}
variable "bastion_allowed_ports" {
type = list(any)
default = []
}
variable "use_access_ip" {
default = 1
}

View file

@ -212,7 +212,7 @@ def metal_device(resource, tfvars=None):
'project_id': raw_attrs['project_id'],
'state': raw_attrs['state'],
# ansible
'ansible_host': raw_attrs['network.0.address'],
'ansible_ssh_host': raw_attrs['network.0.address'],
'ansible_ssh_user': 'root', # Use root by default in metal
# generic
'ipv4_address': raw_attrs['network.0.address'],
@ -292,16 +292,16 @@ def openstack_host(resource, module_name):
try:
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
attrs.update({
'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
'publicly_routable': True,
})
else:
attrs.update({
'ansible_host': raw_attrs['access_ip_v4'],
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_host': '', 'publicly_routable': False})
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
@ -349,7 +349,7 @@ def iter_host_ips(hosts, ips):
'access_ip_v4': ip,
'access_ip': ip,
'public_ipv4': ip,
'ansible_host': ip,
'ansible_ssh_host': ip,
})
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
@ -389,7 +389,7 @@ def query_list(hosts):
def query_hostfile(hosts):
out = ['## begin hosts generated by terraform.py ##']
out.extend(
'{}\t{}'.format(attrs['ansible_host'].ljust(16), name)
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
for name, attrs, _ in hosts
)

View file

@ -112,26 +112,12 @@ terraform destroy --var-file cluster-settings.tfvars \
* `size`: The size of the additional disk in GB
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
* `firewall_enabled`: Enable firewall rules
* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting.
* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default.
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
* `start_address`: Start of address range to allow
* `end_address`: End of address range to allow
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
* `start_address`: Start of address range to allow
* `end_address`: End of address range to allow
* `master_allowed_ports`: List of port ranges that should be allowed to access the masters
* `protocol`: Protocol *(tcp|udp|icmp)*
* `port_range_min`: Start of port range to allow
* `port_range_max`: End of port range to allow
* `start_address`: Start of address range to allow
* `end_address`: End of address range to allow
* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers
* `protocol`: Protocol *(tcp|udp|icmp)*
* `port_range_min`: Start of port range to allow
* `port_range_max`: End of port range to allow
* `start_address`: Start of address range to allow
* `end_address`: End of address range to allow
* `loadbalancer_enabled`: Enable managed load balancer
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends

View file

@ -95,9 +95,7 @@ machines = {
}
}
firewall_enabled = false
firewall_default_deny_in = false
firewall_default_deny_out = false
firewall_enabled = false
master_allowed_remote_ips = [
{
@ -113,9 +111,6 @@ k8s_allowed_remote_ips = [
}
]
master_allowed_ports = []
worker_allowed_ports = []
loadbalancer_enabled = false
loadbalancer_plan = "development"
loadbalancers = {

View file

@ -24,12 +24,8 @@ module "kubernetes" {
ssh_public_keys = var.ssh_public_keys
firewall_enabled = var.firewall_enabled
firewall_default_deny_in = var.firewall_default_deny_in
firewall_default_deny_out = var.firewall_default_deny_out
master_allowed_remote_ips = var.master_allowed_remote_ips
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports
loadbalancer_enabled = var.loadbalancer_enabled
loadbalancer_plan = var.loadbalancer_plan

View file

@ -228,126 +228,6 @@ resource "upcloud_firewall_rules" "master" {
source_address_start = "0.0.0.0"
}
}
dynamic firewall_rule {
for_each = var.master_allowed_ports
content {
action = "accept"
comment = "Allow access on this port"
destination_port_end = firewall_rule.value.port_range_max
destination_port_start = firewall_rule.value.port_range_min
direction = "in"
family = "IPv4"
protocol = firewall_rule.value.protocol
source_address_end = firewall_rule.value.end_address
source_address_start = firewall_rule.value.start_address
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "94.237.40.9"
source_address_start = "94.237.40.9"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "94.237.127.9"
source_address_start = "94.237.127.9"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
source_address_end = "2a04:3540:53::1"
source_address_start = "2a04:3540:53::1"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
source_address_end = "2a04:3544:53::1"
source_address_start = "2a04:3544:53::1"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "255.255.255.255"
source_address_start = "0.0.0.0"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
}
}
firewall_rule {
action = var.firewall_default_deny_in ? "drop" : "accept"
direction = "in"
}
firewall_rule {
action = var.firewall_default_deny_out ? "drop" : "accept"
direction = "out"
}
}
resource "upcloud_firewall_rules" "k8s" {
@ -385,126 +265,6 @@ resource "upcloud_firewall_rules" "k8s" {
source_address_start = "0.0.0.0"
}
}
dynamic firewall_rule {
for_each = var.worker_allowed_ports
content {
action = "accept"
comment = "Allow access on this port"
destination_port_end = firewall_rule.value.port_range_max
destination_port_start = firewall_rule.value.port_range_min
direction = "in"
family = "IPv4"
protocol = firewall_rule.value.protocol
source_address_end = firewall_rule.value.end_address
source_address_start = firewall_rule.value.start_address
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "94.237.40.9"
source_address_start = "94.237.40.9"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "94.237.127.9"
source_address_start = "94.237.127.9"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
source_address_end = "2a04:3540:53::1"
source_address_start = "2a04:3540:53::1"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
content {
action = "accept"
comment = "UpCloud DNS"
source_port_end = "53"
source_port_start = "53"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
source_address_end = "2a04:3544:53::1"
source_address_start = "2a04:3544:53::1"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv4"
protocol = firewall_rule.value
source_address_end = "255.255.255.255"
source_address_start = "0.0.0.0"
}
}
dynamic firewall_rule {
for_each = var.firewall_default_deny_in ? ["udp"] : []
content {
action = "accept"
comment = "NTP Port"
source_port_end = "123"
source_port_start = "123"
direction = "in"
family = "IPv6"
protocol = firewall_rule.value
}
}
firewall_rule {
action = var.firewall_default_deny_in ? "drop" : "accept"
direction = "in"
}
firewall_rule {
action = var.firewall_default_deny_out ? "drop" : "accept"
direction = "out"
}
}
resource "upcloud_loadbalancer" "lb" {

View file

@ -49,34 +49,6 @@ variable "k8s_allowed_remote_ips" {
}))
}
variable "master_allowed_ports" {
type = list(object({
protocol = string
port_range_min = number
port_range_max = number
start_address = string
end_address = string
}))
}
variable "worker_allowed_ports" {
type = list(object({
protocol = string
port_range_min = number
port_range_max = number
start_address = string
end_address = string
}))
}
variable "firewall_default_deny_in" {
type = bool
}
variable "firewall_default_deny_out" {
type = bool
}
variable "loadbalancer_enabled" {
type = bool
}

View file

@ -3,7 +3,7 @@ terraform {
required_providers {
upcloud = {
source = "UpCloudLtd/upcloud"
version = "~>2.5.0"
version = "~>2.4.0"
}
}
required_version = ">= 0.13"

View file

@ -95,10 +95,7 @@ machines = {
}
}
firewall_enabled = false
firewall_default_deny_in = false
firewall_default_deny_out = false
firewall_enabled = false
master_allowed_remote_ips = [
{
@ -114,9 +111,6 @@ k8s_allowed_remote_ips = [
}
]
master_allowed_ports = []
worker_allowed_ports = []
loadbalancer_enabled = false
loadbalancer_plan = "development"
loadbalancers = {

View file

@ -79,38 +79,6 @@ variable "k8s_allowed_remote_ips" {
default = []
}
variable "master_allowed_ports" {
description = "List of ports to allow on masters"
type = list(object({
protocol = string
port_range_min = number
port_range_max = number
start_address = string
end_address = string
}))
}
variable "worker_allowed_ports" {
description = "List of ports to allow on workers"
type = list(object({
protocol = string
port_range_min = number
port_range_max = number
start_address = string
end_address = string
}))
}
variable "firewall_default_deny_in" {
description = "Add firewall policies that deny all inbound traffic by default"
default = false
}
variable "firewall_default_deny_out" {
description = "Add firewall policies that deny all outbound traffic by default"
default = false
}
variable "loadbalancer_enabled" {
description = "Enable load balancer"
default = false

View file

@ -3,7 +3,7 @@ terraform {
required_providers {
upcloud = {
source = "UpCloudLtd/upcloud"
version = "~>2.5.0"
version = "~>2.4.0"
}
}
required_version = ">= 0.13"

View file

@ -35,7 +35,9 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
## Requirements
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
* Terraform 0.13.0 or newer
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
## Quickstart

View file

@ -1,13 +0,0 @@
#!/bin/bash
ansible-playbook -i inventory/${MY_INVENTORY}/inventory.ini --become --user=${MY_SSH_USER} --become-user=root cluster.yml -e etcd_retries=42
export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf
echo
echo "execute the following in any shell where you want to connect to your cluster with kubectl : "
echo "export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf"
kubectl create namespace infra
echo
echo ArgoCD admin password :
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo

View file

@ -35,10 +35,7 @@
* [OpenSUSE](docs/opensuse.md)
* [RedHat Enterprise Linux](docs/rhel.md)
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
* [Amazon Linux 2](docs/amazonlinux.md)
* [UOS Linux](docs/uoslinux.md)
* [openEuler notes](docs/openeuler.md))
* CRI
* [Containerd](docs/containerd.md)
* [Docker](docs/docker.md)
@ -53,7 +50,6 @@
* [DNS Stack](docs/dns-stack.md)
* [Kubernetes reliability](docs/kubernetes-reliability.md)
* [Local Registry](docs/kubernetes-apps/registry.md)
* [NTP](docs/ntp.md)
* External Storage Provisioners
* [RBD Provisioner](docs/kubernetes-apps/rbd_provisioner.md)
* [CEPHFS Provisioner](docs/kubernetes-apps/cephfs_provisioner.md)

View file

@ -3,7 +3,7 @@
## Installing Ansible
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
Depending on your available python version you may be limited in choosing which ansible version to use.
Depending on your available python version you may be limited in chooding which ansible version to use.
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
@ -13,7 +13,7 @@ KUBESPRAYDIR=kubespray
ANSIBLE_VERSION=2.12
virtualenv --python=$(which python3) $VENVDIR
source $VENVDIR/bin/activate
cd $KUBESPRAYDIR
cd $KUESPRAYDIR
pip install -U -r requirements-$ANSIBLE_VERSION.txt
test -f requirements-$ANSIBLE_VERSION.yml && \
ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \
@ -26,6 +26,8 @@ Based on the table below and the available python version for your ansible host
| Ansible Version | Python Version |
| --------------- | -------------- |
| 2.9 | 2.7,3.5-3.8 |
| 2.10 | 2.7,3.5-3.8 |
| 2.11 | 2.7,3.5-3.9 |
| 2.12 | 3.8-3.10 |
@ -281,7 +283,7 @@ For more information about Ansible and bastion hosts, read
## Mitogen
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
## Beyond ansible 2.9

View file

@ -27,7 +27,7 @@ Check the associated storage class (if you enabled persistent_volumes):
```ShellSession
$ kubectl get storageclass
NAME PROVISIONER AGE
ebs-sc ebs.csi.aws.com 45s
ebs-sc ebs.csi.aws.com 45s
```
You can run a PVC and an example Pod using this file `ebs-pod.yml`:
@ -71,8 +71,8 @@ You should see the PVC provisioned and bound:
```ShellSession
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
```
And the volume mounted to the example Pod (wait until the Pod is Running):

View file

@ -57,28 +57,19 @@ The name of the network security group your instances are in, can be retrieved v
These will have to be generated first:
- Create an Azure AD Application with:
```ShellSession
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
```
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
Display name, identifier-uri, homepage and the password can be chosen
Note the AppId in the output.
- Create Service principal for the application with:
```ShellSession
az ad sp create --id AppId
```
`az ad sp create --id AppId`
This is the AppId from the last command
- Create the role assignment with:
```ShellSession
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
```
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.

View file

@ -71,27 +71,14 @@ The name of the resource group that contains the route table. Defaults to `azur
These will have to be generated first:
- Create an Azure AD Application with:
```ShellSession
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
```
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
display name, identifier-uri, homepage and the password can be chosen
Note the AppId in the output.
- Create Service principal for the application with:
```ShellSession
az ad sp create --id AppId
```
`az ad sp create --id AppId`
This is the AppId from the last command
- Create the role assignment with:
```ShellSession
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
```
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.

View file

@ -48,13 +48,11 @@ The `kubespray-defaults` role is expected to be run before this role.
Remember to disable fact gathering since Python might not be present on hosts.
```yaml
- hosts: all
gather_facts: false # not all hosts might be able to run modules yet
roles:
- kubespray-defaults
- bootstrap-os
```
- hosts: all
gather_facts: false # not all hosts might be able to run modules yet
roles:
- kubespray-defaults
- bootstrap-os
## License

View file

@ -72,14 +72,9 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
The following variables need to be set as follow:
```yml
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
nat_outgoing: false # (optional) NAT outgoing (default value: true).
```
And you'll need to edit the inventory and add a hostvar `local_as` by node.
The following variables need to be set:
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
you'll need to edit the inventory and add a hostvar `local_as` by node.
```ShellSession
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
@ -129,7 +124,8 @@ You need to edit your inventory and add:
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
group of `k8s_cluster` group.
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with standalone route reflectors:
@ -176,8 +172,6 @@ node5
[rack0:vars]
cluster_id="1.0.0.1"
calico_rr_id=rr1
calico_group_id=rr1
```
The inventory above will deploy the following topology assuming that calico's
@ -205,14 +199,6 @@ To re-define health host please set the following variable in your inventory:
calico_healthhost: "0.0.0.0"
```
### Optional : Configure VXLAN hardware Offload
Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this:
```yml
calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver)
```
### Optional : Configure Calico Node probe timeouts
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
@ -226,7 +212,7 @@ calico_node_readinessprobe_timeout: 10
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
*IP in IP* and *VXLAN* is mutually exclusive modes.
*IP in IP* and *VXLAN* is mutualy exclusive modes.
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
@ -259,14 +245,14 @@ calico_network_backend: 'bird'
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding.
Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
```shell
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
```
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray.
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
@ -329,13 +315,6 @@ calico_ipam_host_local: true
Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information.
### Optional : Disable CNI logging to disk
Calico CNI plugin logs to /var/log/calico/cni/cni.log and to stderr.
stderr of CNI plugins can be found in the logs of container runtime.
You can disable Calico CNI logging to disk by setting `calico_cni_log_file_path: false`.
## eBPF Support
Calico supports eBPF for its data plane see [an introduction to the Calico eBPF Dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) for further information.
@ -383,7 +362,7 @@ use_localhost_as_kubeapi_loadbalancer: true
### Tunneled versus Direct Server Return
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
To configure DSR:
@ -409,7 +388,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
## Wireguard Encryption
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
To enable wireguard support:

View file

@ -2,14 +2,14 @@
## CentOS 7
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
## CentOS 8
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
The only tested configuration for now is using Calico CNI
You need to add `calico_iptables_backend: "NFT"` to your configuration.
You need to add `calico_iptables_backend: "NFT"` or `calico_iptables_backend: "Auto"` to your configuration.
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
you need to ensure they are using iptables-nft.

View file

@ -8,19 +8,17 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|---| --- | --- | --- | --- | --- | --- | --- | --- |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
fedora34 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
## crio
@ -32,15 +30,13 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
## docker
@ -48,16 +44,14 @@ ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|---| --- | --- | --- | --- | --- | --- | --- | --- |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View file

@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version
```yml
cilium_version: v1.12.1
cilium_version: v1.11.3
```
## Add variable to config
@ -121,23 +121,6 @@ cilium_encryption_type: "wireguard"
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
## Bandwidth Manager
Ciliums bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
To use this function, set the following parameters
```yml
cilium_enable_bandwidth_manager: true
```
## Install Cilium Hubble
k8s-net-cilium.yml:
@ -170,32 +153,3 @@ cilium_hubble_metrics:
```
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
## Upgrade considerations
### Rolling-restart timeouts
Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update.
As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster.
As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml)
are not appropriate for large clusters.
This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance.
This is configured by the following values:
```yaml
# Configure how long to wait for the Cilium DaemonSet to be ready again
cilium_rolling_restart_wait_retries_count: 30
cilium_rolling_restart_wait_retries_delay_seconds: 10
```
The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no
drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns.
Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it
to become ready.
Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You
probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you.

View file

@ -39,68 +39,4 @@ containerd_registries:
image_command_tool: crictl
```
### Containerd Runtimes
Containerd supports multiple runtime configurations that can be used with
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
details of containerd configuration.
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
```yaml
containerd_runc_runtime:
name: runc
type: "io.containerd.runc.v2"
engine: ""
root: ""
options:
systemdCgroup: "false"
binaryName: /usr/local/bin/my-runc
base_runtime_spec: cri-base.json
```
Further runtimes can be configured with `containerd_additional_runtimes`, which
is a list of such dictionaries.
Default runtime can be changed by setting `containerd_default_runtime`.
#### Base runtime specs and limiting number of open files
`base_runtime_spec` key in a runtime dictionary is used to explicitly
specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`,
which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and
updated to include a custom setting for maximum number of file descriptors per
container.
You can change maximum number of file descriptors per container for the default
`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile`
variable.
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
```yaml
containerd_base_runtime_specs:
cri-spec-custom.json: |
{
"ociVersion": "1.0.2-dev",
"process": {
"user": {
"uid": 0,
...
```
The files in this dict will be placed in containerd config directory,
`/etc/containerd` by default. The files can then be referenced by filename in a
runtime:
```yaml
containerd_runc_runtime:
name: runc
base_runtime_spec: cri-spec-custom.json
...
```
[containerd]: https://containerd.io/
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
[runtime-spec]: https://github.com/opencontainers/runtime-spec

View file

@ -3,39 +3,34 @@
Debian Jessie installation Notes:
- Add
```ini
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
```
to `/etc/default/grub`. Then update with
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
to /etc/default/grub. Then update with
```ShellSession
sudo update-grub
sudo update-grub2
sudo reboot
sudo update-grub
sudo update-grub2
sudo reboot
```
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
```ShellSession
apt-get -t jessie-backports install systemd
```
```apt-get -t jessie-backports install systemd```
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
- Add the Ansible repository and install Ansible to get a proper version
```ShellSession
sudo add-apt-repository ppa:ansible/ansible
sudo apt-get update
sudo apt-get install ansible
```
- Install Jinja2 and Python-Netaddr
```ShellSession
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
```
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)

View file

@ -19,14 +19,6 @@ ndots value to be used in ``/etc/resolv.conf``
It is important to note that multiple search domains combined with high ``ndots``
values lead to poor performance of DNS stack, so please choose it wisely.
## dns_timeout
timeout value to be used in ``/etc/resolv.conf``
## dns_attempts
attempts value to be used in ``/etc/resolv.conf``
### searchdomains
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
@ -34,8 +26,6 @@ Custom search domains to be added in addition to the cluster search domains (``d
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
`remove_default_searchdomains: true` will remove the default cluster search domains.
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
additional search domains. Please take this into the accounts for the limits.
@ -50,12 +40,6 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
DNS servers in early cluster deployment when no cluster DNS is available yet.
### dns_upstream_forward_extra_opts
Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see <https://coredns.io/plugins/forward/> for details).
These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable.
By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`).
### coredns_external_zones
Array of optional external zones to coredns forward queries to. It's injected into
@ -78,13 +62,6 @@ coredns_external_zones:
nameservers:
- 192.168.0.53
cache: 0
- zones:
- mydomain.tld
nameservers:
- 10.233.0.3
cache: 5
rewrite:
- name stop website.tld website.namespace.svc.cluster.local
```
or as INI
@ -230,7 +207,7 @@ cluster service names.
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
@ -286,8 +263,7 @@ nodelocaldns_secondary_skew_seconds: 5
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual
limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
added you are back to 6 names.
limits are a 4 names and 239 chars respectively.
* the ``nameservers`` have a limitation of a 3 servers, although there
is a way to mitigate that with the ``upstream_dns_servers``,

View file

@ -8,7 +8,13 @@ Using the docker container manager:
container_manager: docker
```
*Note:* `cri-dockerd` has replaced `dockershim` across supported kubernetes version in kubespray 2.20.
Using `cri-dockerd` instead of `dockershim`:
```yaml
cri_dockerd_enabled: false
```
*Note:* The `cri_dockerd_enabled: true` setting will become the default in a future kubespray release once kubespray 1.24+ is supported and `dockershim` is removed. At that point, changing this option will be deprecated and silently ignored.
Enabling the `overlay2` graph driver:

View file

@ -54,7 +54,7 @@ Prepare ignition and serve via http (a.e. python -m http.server )
### create guest
```ShellSeasion
```shell script
machine_name=myfcos1
ignition_url=http://mywebserver/fcos.ign

View file

@ -2,8 +2,6 @@
Flannel is a network fabric for containers, designed for Kubernetes
Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard`
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
## Verifying flannel install

View file

@ -2,19 +2,15 @@
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
```ShellSession
--cloud-provider=gce
--cloud-config=/etc/kubernetes/cloud-config
```
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
```yaml
cloud_provider: gce
gce_node_tags: k8s-lb
```
When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall.
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.

View file

@ -36,6 +36,12 @@ The following diagram shows how traffic to the apiserver is directed.
![Image](figures/loadbalancer_localhost.png?raw=true)
Note: Kubernetes master nodes still use insecure localhost access because
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
services. This makes backends receiving unencrypted traffic and may be a
security issue when interconnecting different nodes, or maybe not, if those
belong to the isolated management network without external access.
A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client
connections only to the localhost.
@ -123,6 +129,11 @@ Kubespray has nothing to do with it, this is informational only.
As you can see, the masters' internal API endpoints are always
contacted via the local bind IP, which is `https://bip:sp`.
**Note** that for some cases, like healthchecks of applications deployed by
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
consists of the local `kube_apiserver_insecure_bind_address` and
`kube_apiserver_insecure_port`.
## Optional configurations
### ETCD with a LB

View file

@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
---
## kube-apiserver
authorization_modes: ['Node', 'RBAC']
authorization_modes: ['Node','RBAC']
# AppArmor-based OS
# kube_apiserver_feature_gates: ['AppArmor=true']
#kube_apiserver_feature_gates: ['AppArmor=true']
kube_apiserver_request_timeout: 120s
kube_apiserver_service_account_lookup: true
@ -41,18 +41,7 @@ kube_encrypt_secret_data: true
kube_encryption_resources: [secrets]
kube_encryption_algorithm: "secretbox"
kube_apiserver_enable_admission_plugins:
- EventRateLimit
- AlwaysPullImages
- ServiceAccount
- NamespaceLifecycle
- NodeRestriction
- LimitRanger
- ResourceQuota
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- PodNodeSelector
- PodSecurity
kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
kube_apiserver_admission_control_config_file: true
# EventRateLimit plugin configuration
kube_apiserver_admission_event_rate_limits:
@ -71,7 +60,7 @@ kube_profiling: false
kube_controller_manager_bind_address: 127.0.0.1
kube_controller_terminated_pod_gc_threshold: 50
# AppArmor-based OS
# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"]
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
## kube-scheduler
@ -79,7 +68,7 @@ kube_scheduler_bind_address: 127.0.0.1
kube_kubeadm_scheduler_extra_args:
profiling: false
# AppArmor-based OS
# kube_scheduler_feature_gates: ["AppArmor=true"]
#kube_scheduler_feature_gates: ["AppArmor=true"]
## etcd
etcd_deployment_type: kubeadm
@ -93,24 +82,7 @@ kubelet_event_record_qps: 1
kubelet_rotate_certificates: true
kubelet_streaming_connection_idle_timeout: "5m"
kubelet_make_iptables_util_chains: true
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
kubelet_seccomp_default: true
kubelet_systemd_hardening: true
# In case you have multiple interfaces in your
# control plane nodes and you want to specify the right
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations
kube_owner: root
kube_cert_group: root
# create a default Pod Security Configuration and deny running of insecure pods
# kube_system namespace is exempted by default
kube_pod_security_use_default: true
kube_pod_security_default_enforce: restricted
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
```
Let's take a deep look to the resultant **kubernetes** configuration:
@ -120,8 +92,6 @@ Let's take a deep look to the resultant **kubernetes** configuration:
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
![kubelet hardening](img/kubelet-hardening.png)
Once you have the file properly filled, you can run the **Ansible** command to start the installation:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

View file

@ -6,100 +6,84 @@
* List of all forked repos could be retrieved from github page of original project.
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
```ShellSession
git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray
```
Git will create `.gitmodules` file in your existent ansible repo:
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
Git will create `.gitmodules` file in your existent ansible repo:
```ini
[submodule "3d/kubespray"]
path = 3d/kubespray
url = https://github.com/YOUR_GITHUB/kubespray.git
path = 3d/kubespray
url = https://github.com/YOUR_GITHUB/kubespray.git
```
3. Configure git to show submodule status:
```ShellSession
git config --global status.submoduleSummary true
```
```git config --global status.submoduleSummary true```
4. Add *original* kubespray repo as upstream:
```ShellSession
cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git
```
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
5. Sync your master branch with upstream:
```ShellSession
git checkout master
git fetch upstream
git merge upstream/master
git push origin master
git checkout master
git fetch upstream
git merge upstream/master
git push origin master
```
6. Create a new branch which you will use in your working environment:
```ShellSession
git checkout -b work
```
```git checkout -b work```
***Never*** use master branch of your repository for your commits.
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
```ini
8. ```ini
...
library = ./library/:3d/kubespray/library/
roles_path = ./roles/:3d/kubespray/roles/
...
```
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
For example:
```ini
...
#Kubespray groups:
[kube_node:children]
kubenode
```ini
...
#Kargo groups:
[kube_node:children]
kubenode
[k8s_cluster:children]
kubernetes
[k8s_cluster:children]
kubernetes
[etcd:children]
kubemaster
kubemaster-ha
[etcd:children]
kubemaster
kubemaster-ha
[kube_control_plane:children]
kubemaster
kubemaster-ha
[kube_control_plane:children]
kubemaster
kubemaster-ha
[kubespray:children]
kubernetes
```
[kubespray:children]
kubernetes
```
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
```yml
- name: Import kubespray playbook
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
```
```yml
- name: Include kubespray tasks
include: 3d/kubespray/cluster.yml
```
Or your could copy separate tasks from cluster.yml into your ansible repository.
Or your could copy separate tasks from cluster.yml into your ansible repository.
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
When you update your "work" branch you need to commit changes to ansible repo as well.
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
When you update your "work" branch you need to commit changes to ansible repo as well.
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
## Contributing
@ -111,78 +95,37 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
2. Change working directory to git submodule directory (3d/kubespray).
3. Setup desired user.name and user.email for submodule.
If kubespray is only one submodule in your repo you could use something like:
```ShellSession
git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'
```
If kubespray is only one submodule in your repo you could use something like:
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'```
4. Sync with upstream master:
```ShellSession
git fetch upstream
git merge upstream/master
git push origin master
```
git fetch upstream
git merge upstream/master
git push origin master
```
5. Create new branch for the specific fixes that you want to contribute:
```ShellSession
git checkout -b fixes-name-date-index
```
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
```git checkout -b fixes-name-date-index```
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
```ShellSession
git cherry-pick <COMMIT_HASH>
```
```ShellSession
git cherry-pick <COMMIT_HASH>
```
7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
Also you could use interactive rebase
```ShellSession
git rebase -i HEAD~10
```
to delete commits which you don't want to contribute into original repo.
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
Check that you're on correct branch:
```git status```
And pull changes from upstream (if any):
```git pull --rebase upstream master```
Check that you're on correct branch:
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
```ShellSession
git status
```
And pull changes from upstream (if any):
```ShellSession
git pull --rebase upstream master
```
9. Now push your changes to your **fork** repo with
```ShellSession
git push
```
If your branch doesn't exists on github, git will propose you to use something like
```ShellSession
git push --set-upstream origin fixes-name-date-index
```
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using
```ShellSession
git push origin --delete fixes-name-date-index
git branch -D fixes-name-date-index
```
and start whole process from the beginning.
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.

View file

@ -2,14 +2,6 @@
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
## Prerequisites
You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled.
```yaml
kube_proxy_strict_arp: true
```
## Install
You have to explicitly enable the kube-vip extension:
@ -19,7 +11,7 @@ kube_vip_enabled: true
```
You also need to enable
[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
[kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
```yaml
# HA for control-plane, requires a VIP
@ -36,16 +28,16 @@ kube_vip_services_enabled: false
```
> Note: When using `kube-vip` as LoadBalancer for services,
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/)
[additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
are needed.
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) :
If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
```yaml
kube_vip_arp_enabled: true
```
If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) :
If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
```yaml
kube_vip_bgp_enabled: true

View file

@ -1,11 +1,10 @@
# Local Static Storage Provisioner
# Local Storage Provisioner
The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner)
The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume)
is NOT a dynamic storage provisioner as you would
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
all mounts under the `host_dir` of the specified storage class.
all mounts under the host_dir of the specified storage class.
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
Example:
```yaml
@ -17,18 +16,15 @@ local_volume_provisioner_storage_classes:
host_dir: /mnt/fast-disks
mount_dir: /mnt/fast-disks
block_cleaner_command:
- "/scripts/shred.sh"
- "2"
- "/scripts/shred.sh"
- "2"
volume_mode: Filesystem
fs_type: ext4
```
For each key in `local_volume_provisioner_storage_classes` a "storage class" with
the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`.
The subkeys of each storage class in `local_volume_provisioner_storage_classes`
are converted to camelCase and added as attributes to the storage class in the
ConfigMap.
For each key in `local_volume_provisioner_storage_classes` a storageClass with the
same name is created. The subkeys of each storage class are converted to camelCase and added
as attributes to the storageClass.
The result of the above example is:
```yaml
@ -47,85 +43,80 @@ data:
fsType: ext4
```
Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also
created for each storage class:
```bash
$ kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY
fast-disks kubernetes.io/no-provisioner Delete
local-storage kubernetes.io/no-provisioner Delete
```
The default StorageClass is `local-storage` on `/mnt/disks`;
the rest of this documentation will use that path as an example.
The default StorageClass is local-storage on /mnt/disks,
the rest of this doc will use that path as an example.
## Examples to create local storage volumes
1. Using tmpfs
1. tmpfs method:
```bash
for vol in vol1 vol2 vol3; do
mkdir /mnt/disks/$vol
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
done
```
``` bash
for vol in vol1 vol2 vol3; do
mkdir /mnt/disks/$vol
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
done
```
The tmpfs method is not recommended for production because the mounts are not
persistent and data will be deleted on reboot.
The tmpfs method is not recommended for production because the mount is not
persistent and data will be deleted on reboot.
1. Mount physical disks
```bash
mkdir /mnt/disks/ssd1
mount /dev/vdb1 /mnt/disks/ssd1
```
``` bash
mkdir /mnt/disks/ssd1
mount /dev/vdb1 /mnt/disks/ssd1
```
Physical disks are recommended for production environments because it offers
complete isolation in terms of I/O and capacity.
Physical disks are recommended for production environments because it offers
complete isolation in terms of I/O and capacity.
1. Mount unpartitioned physical devices
```bash
for disk in /dev/sdc /dev/sdd /dev/sde; do
ln -s $disk /mnt/disks
done
```
``` bash
for disk in /dev/sdc /dev/sdd /dev/sde; do
ln -s $disk /mnt/disks
done
```
This saves time of precreating filesystems. Note that your storageclass must have
`volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the
disk will be added as a raw block device.
1. PersistentVolumes with `volumeMode="Block"`
Just like above, you can create PersistentVolumes with volumeMode `Block`
by creating a symbolic link under discovery directory to the block device on
the node, if you set `volume_mode` to `"Block"`. This will create a volume
presented into a Pod as a block device, without any filesystem on it.
This saves time of precreating filesystems. Note that your storageclass must have
volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
disk will be added as a raw block device.
1. File-backed sparsefile method
```bash
truncate /mnt/disks/disk5 --size 2G
mkfs.ext4 /mnt/disks/disk5
mkdir /mnt/disks/vol5
mount /mnt/disks/disk5 /mnt/disks/vol5
```
``` bash
truncate /mnt/disks/disk5 --size 2G
mkfs.ext4 /mnt/disks/disk5
mkdir /mnt/disks/vol5
mount /mnt/disks/disk5 /mnt/disks/vol5
```
If you have a development environment and only one disk, this is the best way
to limit the quota of persistent volumes.
If you have a development environment and only one disk, this is the best way
to limit the quota of persistent volumes.
1. Simple directories
In a development environment, using `mount --bind` works also, but there is no capacity
management.
In a development environment using `mount --bind` works also, but there is no capacity
management.
1. Block volumeMode PVs
Create a symbolic link under discovery directory to the block device on the node. To use
raw block devices in pods, volume_type should be set to "Block".
## Usage notes
Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for
Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be
Beta PV.NodeAffinity field is used by default. If running against an older K8s
version, the useAlphaAPI flag must be set in the configMap.
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
will be recreated and read the size correctly.
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
Flatcar Container Linux). Pods with persistent volume claims will not be
able to start if the mounts become unavailable.
## Further reading
Refer to the upstream docs here: <https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner>
Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>

View file

@ -29,7 +29,8 @@ use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
```yaml
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
``` yaml
kind: PersistentVolume
apiVersion: v1
metadata:
@ -45,6 +46,7 @@ spec:
fsType: "ext4"
{% endif %}
```
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
If, for example, you wanted to use NFS you would just need to change the
`gcePersistentDisk` block to `nfs`. See
@ -66,7 +68,8 @@ Now that the Kubernetes cluster knows that some storage exists, you can put a
claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
```yaml
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
``` yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
@ -79,6 +82,7 @@ spec:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}
```
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
@ -89,7 +93,8 @@ gives you the right to use this storage until you release the claim.
Now we can run a Docker registry:
```yaml
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
``` yaml
apiVersion: v1
kind: ReplicationController
metadata:
@ -133,6 +138,7 @@ spec:
persistentVolumeClaim:
claimName: kube-registry-pvc
```
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
@ -140,7 +146,8 @@ spec:
Now that we have a registry `Pod` running, we can expose it as a Service:
```yaml
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
``` yaml
apiVersion: v1
kind: Service
metadata:
@ -157,6 +164,7 @@ spec:
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
## Expose the registry on each node
@ -164,7 +172,8 @@ Now that we have a running `Service`, we need to expose it onto each Kubernetes
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
```yaml
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
``` yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -198,6 +207,7 @@ spec:
containerPort: 80
hostPort: 5000
```
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
When modifying replication-controller, service and daemon-set definitions, take
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
@ -209,7 +219,7 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
```ShellSession
``` console
$ curl localhost:5000
404 page not found
```
@ -219,7 +229,7 @@ $ curl localhost:5000
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
```yaml
``` yaml
image: localhost:5000/user/container
```
@ -231,7 +241,7 @@ building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
```ShellSession
``` console
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')

View file

@ -1,11 +0,0 @@
# Kylin Linux
Kylin Linux is supported with docker and containerd runtimes.
**Note:** that Kylin Linux is not currently covered in kubespray CI and
support for it is currently considered experimental.
At present, only `Kylin Linux Advanced Server V10 (Sword)` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
There are no special considerations for using Kylin Linux as the target OS
for Kubespray deployments.

View file

@ -2,7 +2,7 @@
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
## Prerequisites
@ -19,7 +19,6 @@ You have to explicitly enable the MetalLB extension and set an IP address range
```yaml
metallb_enabled: true
metallb_speaker_enabled: true
metallb_avoid_buggy_ips: true
metallb_ip_range:
- 10.5.0.0/16
```
@ -70,17 +69,16 @@ metallb_peers:
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
```yaml
metallb_speaker_enabled: false
metallb_avoid_buggy_ips: true
metallb_ip_range:
- 10.5.0.0/16
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
```
If you have additional loadbalancer IP pool in `metallb_additional_address_pools` , ensure to add them to the list.
If you have additional loadbalancer IP pool in `metallb_additional_address_pools`, ensure to add them to the list.
```yaml
metallb_speaker_enabled: false
@ -92,13 +90,11 @@ metallb_additional_address_pools:
- 10.6.0.0/16
protocol: "bgp"
auto_assign: false
avoid_buggy_ips: true
kube_service_pool_2:
ip_range:
- 10.10.0.0/16
protocol: "bgp"
auto_assign: false
avoid_buggy_ips: true
calico_advertise_service_loadbalancer_ips:
- 10.5.0.0/16
- 10.6.0.0/16

View file

@ -1,66 +0,0 @@
# Public Download Mirror
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
## Configuring Kubespray to use a mirror site
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
```shell
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
```
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
You can replace the `m.daocloud.io` with any site you want.
## Example Usage Full Steps
You can follow the full steps to use the kubesray with mirror. for example:
Install Ansible according to Ansible installation guide then run the following steps:
```shell
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# Use the download mirror
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
EOF
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
```
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
## Community-run mirror sites
DaoCloud(China)
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)

View file

@ -124,7 +124,7 @@ to
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
### 3) Edit cluster-info configmap in kube-public namespace
### 3) Edit cluster-info configmap in kube-system namespace
`kubectl edit cm -n kube-public cluster-info`

View file

@ -1,50 +0,0 @@
# NTP synchronization
The Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems. Time synchronization is important to Kubernetes and Etcd.
## Enable the NTP
To start the ntpd(or chrony) service and enable it at system boot. There are related specific variables:
```ShellSession
ntp_enabled: true
```
The NTP service would be enabled and sync time automatically.
## Customize the NTP configure file
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
```ShellSession
ntp_enabled: true
ntp_manage_config: true
ntp_servers:
- "0.your-ntp-server.org iburst"
- "1.your-ntp-server.org iburst"
- "2.your-ntp-server.org iburst"
- "3.your-ntp-server.org iburst"
```
## Setting the TimeZone
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
```ShellSession
ntp_enabled: true
ntp_timezone: Etc/UTC
```
## Advanced Configure
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
```ShellSession
ntp_tinker_panic: true
```
Force sync time immediately by NTP after the ntp installed, which is useful in newly installed system.
```ShellSession
ntp_force_sync_immediately: true
```

View file

@ -1,25 +1,12 @@
# Offline environment
In case your servers don't have access to the internet directly (for example
when deploying on premises with security constraints), you need to get the
following artifacts in advance from another environment where has access to the internet.
* Some static files (zips and binaries)
* OS packages (rpm/deb files)
* Container images used by Kubespray. Exhaustive list depends on your setup
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
* [Optional] Helm chart files (only required if `helm_enabled=true`)
Then you need to setup the following services on your offline environment:
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
* an internal Yum/Deb repository for OS packages
* an internal container image registry that need to be populated with all container images used by Kubespray
* [Optional] an internal PyPi server for python packages used by Kubespray
* [Optional] an internal Helm registry for Helm chart files
You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script.
In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md).
* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
* [Optional] an internal Helm registry (only required if `helm_enabled=true`)
## Configure Inventory
@ -36,7 +23,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
# etcd is optional if you **DON'T** use etcd_deployment=host
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# If using Calico

View file

@ -1,11 +0,0 @@
# OpenEuler
[OpenEuler](https://www.openeuler.org/en/) Linux is supported with docker and containerd runtimes.
**Note:** that OpenEuler Linux is not currently covered in kubespray CI and
support for it is currently considered experimental.
At present, only `openEuler 22.03 LTS` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
There are no special considerations for using OpenEuler Linux as the target OS
for Kubespray deployments.

View file

@ -34,6 +34,52 @@ Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expecte
Unless you are using calico or kube-router you can now run the playbook.
**Additional step needed when using calico or kube-router:**
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.
## The external cloud provider
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
@ -110,49 +156,3 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
- Run the `cluster.yml` playbook
## Additional step needed when using calico or kube-router
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
First you will need the ids of your OpenStack instances that will run kubernetes:
```bash
openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
```
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
```bash
openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
```
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
```bash
# allow kube_service_addresses and kube_pods_subnet network
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
Now you can finally run the playbook.

View file

@ -14,8 +14,8 @@ hands-on guide to get started with Kubespray.
## Cluster Details
* [kubespray](https://github.com/kubernetes-sigs/kubespray)
* [kubernetes](https://github.com/kubernetes/kubernetes)
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
## Prerequisites
@ -252,7 +252,11 @@ Ansible will now execute the playbook, this can take up to 20 minutes.
We will leverage a kubeconfig file from one of the controller nodes to access
the cluster as administrator from our local workstation.
> In this simplified set-up, we did not include a load balancer that usually sits on top of the three controller nodes for a high available API server endpoint. In this simplified tutorial we connect directly to one of the three controllers.
> In this simplified set-up, we did not include a load balancer that usually
sits on top of the
three controller nodes for a high available API server endpoint. In this
simplified tutorial we connect directly to one of the three
controllers.
First, we need to edit the permission of the kubeconfig file on one of the
controller nodes:
@ -466,7 +470,7 @@ kubectl logs $POD_NAME
#### Exec
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container).
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container).
Print the nginx version by executing the `nginx -v` command in the `nginx` container:

View file

@ -1,9 +0,0 @@
# UOS Linux
UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes.
**Note:** that UOS Linux is not currently covered in kubespray CI and
support for it is currently considered experimental.
There are no special considerations for using UOS Linux as the target OS
for Kubespray deployments.

View file

@ -58,7 +58,7 @@ see [download documentation](/docs/downloads.md).
The following is an example of setting up and running kubespray using `vagrant`.
For repeated runs, you could save the script to a file in the root of the
kubespray and run it by executing `source <name_of_the_file>`.
kubespray and run it by executing 'source <name_of_the_file>.
```ShellSession
# use virtualenv to install all python requirements

View file

@ -15,7 +15,7 @@ Some variables of note include:
* *calico_version* - Specify version of Calico to use
* *calico_cni_version* - Specify version of Calico CNI plugin to use
* *docker_version* - Specify version of Docker to use (should be quoted
* *docker_version* - Specify version of Docker to used (should be quoted
string). Must match one of the keys defined for *docker_versioned_pkg*
in `roles/container-engine/docker/vars/*.yml`.
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
@ -28,7 +28,6 @@ Some variables of note include:
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes version
* *searchdomains* - Array of DNS domains to search when looking up hostnames
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
* *nameservers* - Array of nameservers to use for DNS lookup
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
@ -82,7 +81,7 @@ following default cluster parameters:
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
(assertion not applicable to calico which doesn't use this as a hard limit, see
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
@ -100,7 +99,7 @@ following default cluster parameters:
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
(default is k8s_external.local)
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
on the CoreDNS service.
@ -167,9 +166,7 @@ variables to match your requirements.
addition to Kubespray deployed DNS
* *nameservers* - Array of DNS servers configured for use by hosts
* *searchdomains* - Array of up to 4 search domains
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers
For more information, see [DNS
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
@ -178,47 +175,26 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000``
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
* *containerd_default_runtime* - Sets the default Containerd runtime used by the Kubernetes CRI plugin.
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars.
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node.
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
By default autodetection is used to match container manager configuration.
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches.
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches.
**Note** that server certificates are **not** approved automatically. Approve them manually
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
* *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable.
Example:
The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`.
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
* *node_labels* - Labels applied to nodes via `kubectl label node`.
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can only be defined as a dict:

View file

@ -31,13 +31,12 @@ You need to source the vSphere credentials you use to deploy your machines that
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use |
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use |
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrar image tag to use |
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrat image tag to use |
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run |
## Usage example

View file

@ -21,14 +21,14 @@ After this step you should have:
### Kubespray configuration
First in `inventory/sample/group_vars/all/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
```yml
cloud_provider: "external"
external_cloud_provider: "vsphere"
```
Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
| Variable | Required | Type | Choices | Default | Comment |
|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------|

View file

@ -1,140 +0,0 @@
---
## Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
# access_ip: 1.1.1.1
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
# loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
## If the cilium is going to be used in strict mode, we can use the
## localhost connection and not use the external LB. If this parameter is
## not specified, the first node to connect to kubeapi will be used.
# use_localhost_as_kubeapi_loadbalancer: true
## Local loadbalancer should use this port
## And must be set port 6443
loadbalancer_apiserver_port: 6443
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false
## Upstream dns servers
# upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
# cloud_provider:
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud'
## When openstack or vsphere are used make sure to source in the required fields
# external_cloud_provider:
## Set these proxy values in order to update package manager and docker daemon to use proxies
# http_proxy: ""
# https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
# no_proxy: ""
## Some problems may occur when downloading files over https proxy due to ansible bug
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
# download_validate_certs: False
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
# additional_no_proxy: ""
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
## skip_http_proxy_on_os_packages to true
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
# skip_http_proxy_on_os_packages: false
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
no_proxy_exclude_workers: false
## Certificate Management
## This setting determines whether certs are generated via scripts.
## Chose 'none' if you provide your own certificates.
## Option is "script", "none"
# cert_management: script
## Set to true to allow pre-checks to fail and continue deployment
# ignore_assert_errors: false
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
# kube_read_only_port: 10255
## Set true to download and cache container
# download_container: true
## Deploy container engine
# Set false if you want to deploy container engine manually.
# deploy_container_engine: true
## Red Hat Enterprise Linux subscription registration
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
## Update RHEL subscription purpose usage, role and SLA if necessary
# rh_subscription_username: ""
# rh_subscription_password: ""
# rh_subscription_org_id: ""
# rh_subscription_activation_key: ""
# rh_subscription_usage: "Development"
# rh_subscription_role: "Red Hat Enterprise Server"
# rh_subscription_sla: "Self-Support"
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
# ping_access_ip: true
# sysctl_file_path to add sysctl conf to
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
# kube_webhook_token_auth_url: https://...
## base64-encoded string of the webhook's CA certificate
# kube_webhook_token_auth_ca_data: "LS0t..."
## NTP Settings
# Start the ntpd or chrony service and enable it at system boot.
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"
## Used to control no_log attribute
unsafe_show_logs: false

View file

@ -1,9 +0,0 @@
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
## and configure the parameters below
# aws_ebs_csi_enabled: true
# aws_ebs_csi_enable_volume_scheduling: true
# aws_ebs_csi_enable_volume_snapshot: false
# aws_ebs_csi_enable_volume_resizing: false
# aws_ebs_csi_controller_replicas: 1
# aws_ebs_csi_plugin_image_tag: latest
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'

View file

@ -1,40 +0,0 @@
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
# azure_cloud:
# azure_tenant_id:
# azure_subscription_id:
# azure_aad_client_id:
# azure_aad_client_secret:
# azure_resource_group:
# azure_location:
# azure_subnet_name:
# azure_security_group_name:
# azure_security_group_resource_group:
# azure_vnet_name:
# azure_vnet_resource_group:
# azure_route_table_name:
# azure_route_table_resource_group:
# supported values are 'standard' or 'vmss'
# azure_vmtype: standard
## Azure Disk CSI credentials and parameters
## see docs/azure-csi.md for details on how to get these values
# azure_csi_tenant_id:
# azure_csi_subscription_id:
# azure_csi_aad_client_id:
# azure_csi_aad_client_secret:
# azure_csi_location:
# azure_csi_resource_group:
# azure_csi_vnet_name:
# azure_csi_vnet_resource_group:
# azure_csi_subnet_name:
# azure_csi_security_group_name:
# azure_csi_use_instance_metadata:
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
## To enable Azure Disk CSI, uncomment below
# azure_csi_enabled: true
# azure_csi_controller_replicas: 1
# azure_csi_plugin_image_tag: latest

View file

@ -1,50 +0,0 @@
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0
# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"
# containerd_runc_runtime:
# name: runc
# type: "io.containerd.runc.v2"
# engine: ""
# root: ""
# containerd_additional_runtimes:
# Example for Kata Containers as additional runtime:
# - name: kata
# type: "io.containerd.kata.v2"
# engine: ""
# root: ""
# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216
# containerd_debug_level: "info"
# containerd_metrics_address: ""
# containerd_metrics_grpc_histogram: false
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define mirror.registry.io or 172.19.16.11:5000
## set "name": "url". insecure url must be started http://
## Port number is also needed if the default HTTPS port is not used.
# containerd_insecure_registries:
# "localhost": "http://127.0.0.1"
# "172.19.16.11:5000": "http://172.19.16.11:5000"
# containerd_registries:
# "docker.io": "https://registry-1.docker.io"
# containerd_max_container_log_line_size: -1
# containerd_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass

View file

@ -1,2 +0,0 @@
## Does coreos need auto upgrade, default is true
# coreos_auto_upgrade: true

View file

@ -1,6 +0,0 @@
# crio_insecure_registries:
# - 10.0.0.2:5000
# crio_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass

View file

@ -1,59 +0,0 @@
---
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
# docker_storage_options: -s overlay2
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
docker_container_storage_setup: false
## It must be define a disk path for docker_container_storage_setup_devs.
## Otherwise docker-storage-setup will be executed incorrectly.
# docker_container_storage_setup_devs: /dev/vdb
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
## Valid options are systemd or cgroupfs, default is systemd
# docker_cgroup_driver: systemd
## Only set this if you have more than 3 nameservers:
## If true Kubespray will only use the first 3, otherwise it will fail
docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
docker_iptables_enabled: "false"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
# define docker bin_dir
docker_bin_dir: "/usr/bin"
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
# kubespray deletes the docker package on each run, so caching the package makes sense
docker_rpm_keepcache: 1
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define 172.19.16.11 or mirror.registry.io
# docker_insecure_registries:
# - mirror.registry.io
# - 172.19.16.11
## Add other registry,example China registry mirror.
# docker_registry_mirrors:
# - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com
## If non-empty will override default system MountFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
# docker_mount_flags:
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
# docker_options: ""

View file

@ -1,16 +0,0 @@
---
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## Additionally you can set this to kubeadm if you want to install etcd using kubeadm
## Kubeadm etcd deployment is experimental and only available for new deployments
## If this is not set, container manager will be inherited from the Kubespray defaults
## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want.
## Also this makes possible to use different container manager for etcd nodes.
# container_manager: containerd
## Settings for etcd deployment type
# Set this to docker if you are using container_manager: docker
etcd_deployment_type: host

View file

@ -1,10 +0,0 @@
## GCP compute Persistent Disk CSI Driver credentials and parameters
## See docs/gcp-pd-csi.md for information about the implementation
## Specify the path to the file containing the service account credentials
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
## To enable GCP Persistent Disk CSI driver, uncomment below
# gcp_pd_csi_enabled: true
# gcp_pd_csi_controller_replicas: 1
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"

View file

@ -1,14 +0,0 @@
## Values for the external Hcloud Cloud Controller
# external_hcloud_cloud:
# hcloud_api_token: ""
# token_secret_name: hcloud
# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support
# service_account_name: cloud-controller-manager
#
# controller_image_tag: "latest"
# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
# ## Format:
# ## external_hcloud_cloud.controller_extra_args:
# ## arg1: "value1"
# ## arg2: "value2"
# controller_extra_args: {}

View file

@ -1,28 +0,0 @@
## When Oracle Cloud Infrastructure is used, set these variables
# oci_private_key:
# oci_region_id:
# oci_tenancy_id:
# oci_user_id:
# oci_user_fingerprint:
# oci_compartment_id:
# oci_vnc_id:
# oci_subnet1_id:
# oci_subnet2_id:
## Override these default/optional behaviors if you wish
# oci_security_list_management: All
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
# oci_security_lists:
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
# oci_use_instance_principals: false
# oci_cloud_controller_version: 0.6.0
## If you would like to control OCI query rate limits for the controller
# oci_rate_limit:
# rate_limit_qps_read:
# rate_limit_qps_write:
# rate_limit_bucket_read:
# rate_limit_bucket_write:
## Other optional variables
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)

View file

@ -1,103 +0,0 @@
---
## Global Offline settings
### Private Container Image Registry
# registry_host: "myprivateregisry.com"
# files_repo: "http://myprivatehttpd"
### If using CentOS, RedHat, AlmaLinux or Fedora
# yum_repo: "http://myinternalyumrepo"
### If using Debian
# debian_repo: "http://myinternaldebianrepo"
### If using Ubuntu
# ubuntu_repo: "http://myinternalubunturepo"
## Container Registry overrides
# kube_image_repo: "{{ registry_host }}"
# gcr_image_repo: "{{ registry_host }}"
# github_image_repo: "{{ registry_host }}"
# docker_image_repo: "{{ registry_host }}"
# quay_image_repo: "{{ registry_host }}"
## Kubernetes components
# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
## CNI Plugins
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
## cri-tools
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
# [Optional] Calico: If using Calico network plugin
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
# [Optional] Cilium: If using Cilium network plugin
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
# [Optional] Flannel: If using Falnnel network plugin
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
# [Optional] helm: only if you set helm_enabled: true
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
# [Optional] crun: only if you set crun_enabled: true
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
# [Optional] kata: only if you set kata_containers_enabled: true
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
# [Optional] cri-dockerd: only if you set container_manager: docker
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
# [Optional] cri-o: only if you set container_manager: crio
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
# [Optional] runc,containerd: only if you set container_runtime: containerd
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
## CentOS/Redhat/AlmaLinux
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
### By default we enable those repo automatically
# rhel_enable_repos: false
### Docker / Containerd
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Fedora
### Docker
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
### Containerd
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Debian
### Docker
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
### Containerd
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
# containerd_debian_repo_repokey: 'YOURREPOKEY'
## Ubuntu
### Docker
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
### Containerd
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'

View file

@ -1,49 +0,0 @@
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
# openstack_blockstorage_ignore_volume_az: yes
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
# openstack_lbaas_enabled: True
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
# openstack_lbaas_create_monitor: "yes"
# openstack_lbaas_monitor_delay: "1m"
# openstack_lbaas_monitor_timeout: "30s"
# openstack_lbaas_monitor_max_retries: "3"
## Values for the external OpenStack Cloud Controller
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
# external_openstack_lbaas_method: "ROUND_ROBIN"
# external_openstack_lbaas_provider: "octavia"
# external_openstack_lbaas_create_monitor: false
# external_openstack_lbaas_monitor_delay: "1m"
# external_openstack_lbaas_monitor_timeout: "30s"
# external_openstack_lbaas_monitor_max_retries: "3"
# external_openstack_lbaas_manage_security_groups: false
# external_openstack_lbaas_internal_lb: false
# external_openstack_network_ipv6_disabled: false
# external_openstack_network_internal_networks: []
# external_openstack_network_public_networks: []
# external_openstack_metadata_search_order: "configDrive,metadataService"
## Application credentials to authenticate against Keystone API
## Those settings will take precedence over username and password that might be set your environment
## All of them are required
# external_openstack_application_credential_name:
# external_openstack_application_credential_id:
# external_openstack_application_credential_secret:
## The tag of the external OpenStack Cloud Controller image
# external_openstack_cloud_controller_image_tag: "latest"
## To use Cinder CSI plugin to provision volumes set this value to true
## Make sure to source in the openstack credentials
# cinder_csi_enabled: true
# cinder_csi_controller_replicas: 1

View file

@ -1,24 +0,0 @@
## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi
## To use UpClouds CSI plugin to provision volumes set this value to true
## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD
# upcloud_csi_enabled: true
# upcloud_csi_controller_replicas: 1
## Override used image tags
# upcloud_csi_provisioner_image_tag: "v3.1.0"
# upcloud_csi_attacher_image_tag: "v3.4.0"
# upcloud_csi_resizer_image_tag: "v1.4.0"
# upcloud_csi_plugin_image_tag: "v0.3.3"
# upcloud_csi_node_image_tag: "v2.5.0"
# upcloud_tolerations: []
## Storage class options
# storage_classes:
# - name: standard
# is_default: true
# expand_persistent_volumes: true
# parameters:
# tier: maxiops
# - name: hdd
# is_default: false
# expand_persistent_volumes: true
# parameters:
# tier: hdd

View file

@ -1,32 +0,0 @@
## Values for the external vSphere Cloud Provider
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
# external_vsphere_vcenter_port: "443"
# external_vsphere_insecure: "true"
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
# external_vsphere_datacenter: "DATACENTER_name"
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
## Vsphere version where located VMs
# external_vsphere_version: "6.7u3"
## Tags for the external vSphere Cloud Provider images
## gcr.io/cloud-provider-vsphere/cpi/release/manager
# external_vsphere_cloud_controller_image_tag: "latest"
## gcr.io/cloud-provider-vsphere/csi/release/syncer
# vsphere_syncer_image_tag: "v2.5.1"
## registry.k8s.io/sig-storage/csi-attacher
# vsphere_csi_attacher_image_tag: "v3.4.0"
## gcr.io/cloud-provider-vsphere/csi/release/driver
# vsphere_csi_controller: "v2.5.1"
## registry.k8s.io/sig-storage/livenessprobe
# vsphere_csi_liveness_probe_image_tag: "v2.6.0"
## registry.k8s.io/sig-storage/csi-provisioner
# vsphere_csi_provisioner_image_tag: "v3.1.0"
## registry.k8s.io/sig-storage/csi-resizer
## makes sense only for vSphere version >=7.0
# vsphere_csi_resizer_tag: "v1.3.0"
## To use vSphere CSI plugin to provision volumes set this value to true
# vsphere_csi_enabled: true
# vsphere_csi_controller_replicas: 1

Some files were not shown because too many files have changed in this diff Show more