Compare commits
235 commits
pre-commit
...
master
Author | SHA1 | Date | |
---|---|---|---|
96f5d1ca87 | |||
7cb7887234 | |||
4728739597 | |||
fc0d58ff48 | |||
491e260d20 | |||
a132733b2d | |||
b377dbb96f | |||
c4d753c931 | |||
ee3b7c5da5 | |||
dcc267f6f4 | |||
ccf60fc9ca | |||
a38a3e7ddf | |||
beb4aa52ea | |||
f7d0fb9ab2 | |||
ff331f4eba | |||
94eae6a8dc | |||
f8d6b54dbb | |||
67c4f2d95e | |||
03fefa8933 | |||
c8ec77a734 | |||
4f32f94a51 | |||
3dc384a17a | |||
f1d0d1a9fe | |||
c036a7d871 | |||
6e63f3d2b4 | |||
09748e80e9 | |||
44a4f356ba | |||
a0f41bf82a | |||
5ae3e2818b | |||
1a0b81ac64 | |||
20d99886ca | |||
b9fe301036 | |||
b5844018f2 | |||
30508502d3 | |||
bca601d377 | |||
65191375b8 | |||
a534eb45ce | |||
e796f08184 | |||
ed38d8d3a1 | |||
07ad5ecfce | |||
4db5e663c3 | |||
529faeea9e | |||
47510899c7 | |||
4cd949c7e1 | |||
31d7e64073 | |||
7c1ee142dd | |||
25e86c5ca9 | |||
c41dd92007 | |||
a564d89d46 | |||
6c6a6e85da | |||
ed0acd8027 | |||
b9a690463d | |||
cbf4586c4c | |||
c3986957c4 | |||
8795cf6494 | |||
80af8a5e79 | |||
b60f65c1e8 | |||
943107115a | |||
ddbe9956e4 | |||
fdbcce3a5e | |||
f007c77641 | |||
9439487219 | |||
df6da52195 | |||
6ca89c80af | |||
7fe0b87d83 | |||
8a654b6955 | |||
5a8cf824f6 | |||
5c25b57989 | |||
5d1fe64bc8 | |||
a731e25778 | |||
0d6dc08578 | |||
40261fdf14 | |||
590b4aa240 | |||
2a696ddb34 | |||
d7f08d1b0c | |||
4aa1ef28ea | |||
58faef6ff6 | |||
34a52a7028 | |||
ce751cb89d | |||
5cf2883444 | |||
6bff338bad | |||
c78862052c | |||
1f54cef71c | |||
d00508105b | |||
c272421910 | |||
78624c5bcb | |||
c681435432 | |||
4d3f637684 | |||
5e14398af4 | |||
990f87acc8 | |||
eeb376460d | |||
ef707b3461 | |||
2af918132e | |||
b9b654714e | |||
fe399e0e0c | |||
b192053e28 | |||
a84271aa7e | |||
1901b512d2 | |||
9fdda7eca8 | |||
a68ed897f0 | |||
582ff96d19 | |||
0374a55eb3 | |||
ccbe38f78c | |||
958840da89 | |||
1530411218 | |||
e5ec0f18c0 | |||
0f44e8c812 | |||
1cc0f3c8c9 | |||
d9c39c274e | |||
c38fb866b7 | |||
5ad1d9db5e | |||
32f3d92d6b | |||
72b45eec2e | |||
23716b0eff | |||
859df84b45 | |||
131bd933a6 | |||
52904ee6ad | |||
e3339fe3d8 | |||
547ef747da | |||
63b27ea067 | |||
bc5881b70a | |||
f4b95d42a6 | |||
ef76a578a4 | |||
3b99d24ceb | |||
4701abff4c | |||
717b8daafe | |||
c346e46022 | |||
24632ae81b | |||
befde271eb | |||
d689f57c94 | |||
ad3f503c0c | |||
ae6c780af6 | |||
8b9cd3959a | |||
dffeab320e | |||
999586a110 | |||
f8d5487f8e | |||
4189008245 | |||
44115d7d7a | |||
841e2f44c0 | |||
a8e4984cf7 | |||
49196c2ec4 | |||
3646dc0bd2 | |||
694de1d67b | |||
31caab5f92 | |||
472996c8b3 | |||
d62c67a5f5 | |||
e486151aea | |||
9c407e667d | |||
18efdc2c51 | |||
6dff39344b | |||
c4de3df492 | |||
f2e11f088b | |||
782f0511b9 | |||
fa093ee609 | |||
612bcc4bb8 | |||
4ad67acedd | |||
467dc19cbd | |||
726711513f | |||
9468642269 | |||
d387d4811f | |||
1b3c2dab2e | |||
76573bf293 | |||
5d3326b93f | |||
68dac4e181 | |||
262c96ec0b | |||
2acdc33aa1 | |||
8acd33d0df | |||
a2e23c1a71 | |||
1b5cc175b9 | |||
a71da25b57 | |||
5ac614f97d | |||
b8b8b82ff4 | |||
7da3dbcb39 | |||
680293e79c | |||
023b16349e | |||
c4976437a8 | |||
97ca2f3c78 | |||
e76385e7cd | |||
7c2fb227f4 | |||
08bfa0b18f | |||
952cad8d63 | |||
5bce39abf8 | |||
fc57c0b27e | |||
dd4bc5fbfe | |||
d2a7434c67 | |||
5fa885b150 | |||
f3fb758f0c | |||
6386ec029c | |||
ad7cefa352 | |||
09d9bc910e | |||
e2f1f8d69d | |||
be2bfd867c | |||
133a7a0e1b | |||
efb47edb9f | |||
36bec19a84 | |||
6db6c8678c | |||
5603f9f374 | |||
7ebb8c3f2e | |||
acb6f243fd | |||
220f149299 | |||
1baabb3c05 | |||
617b17ad46 | |||
8af86e4c1e | |||
9dc9a670a5 | |||
b46ddf35fc | |||
de762400ad | |||
e60ece2b5e | |||
e6976a54e1 | |||
64daaf1887 | |||
1c75ec9ec1 | |||
c8a61ec98c | |||
aeeae76750 | |||
30b062fd43 | |||
8f899a1101 | |||
386c739d5b | |||
fddff783c8 | |||
bbd1161147 | |||
ab938602a9 | |||
e31890806c | |||
30c77ea4c1 | |||
175cdba9b1 | |||
ea29cd0890 | |||
68653c31c0 | |||
be5fdab3aa | |||
f4daf5856e | |||
49d869f662 | |||
b36bb9115a | |||
9ad2d24ad8 | |||
0088fe0ab7 | |||
ab93b17a7e | |||
9f1b980844 | |||
86d05ac180 | |||
bf6fcf6347 | |||
b9e4e27195 | |||
8585134db4 |
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -20,11 +20,13 @@ contrib/terraform/aws/credentials.tfvars
|
||||||
*~
|
*~
|
||||||
vagrant/
|
vagrant/
|
||||||
plugins/mitogen
|
plugins/mitogen
|
||||||
|
deploy.sh
|
||||||
|
|
||||||
# Ansible inventory
|
# Ansible inventory
|
||||||
inventory/*
|
inventory/*
|
||||||
!inventory/local
|
!inventory/local
|
||||||
!inventory/sample
|
!inventory/sample
|
||||||
|
!inventory/c12s-sample
|
||||||
inventory/*/artifacts/
|
inventory/*/artifacts/
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
|
@ -112,3 +114,4 @@ roles/**/molecule/**/__pycache__/
|
||||||
|
|
||||||
# Temp location used by our scripts
|
# Temp location used by our scripts
|
||||||
scripts/tmp/
|
scripts/tmp/
|
||||||
|
tmp.md
|
||||||
|
|
|
@ -8,7 +8,7 @@ stages:
|
||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.19.0
|
KUBESPRAY_VERSION: v2.20.0
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
|
|
|
@ -75,6 +75,13 @@ check-readme-versions:
|
||||||
script:
|
script:
|
||||||
- tests/scripts/check_readme_versions.sh
|
- tests/scripts/check_readme_versions.sh
|
||||||
|
|
||||||
|
check-typo:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/check_typo.sh
|
||||||
|
|
||||||
ci-matrix:
|
ci-matrix:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
|
|
|
@ -51,6 +51,11 @@ packet_ubuntu20-aio-docker:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu20-calico-aio-hardening:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
packet_ubuntu18-calico-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
|
@ -151,6 +156,11 @@ packet_rockylinux8-calico:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_rockylinux9-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_almalinux8-docker:
|
packet_almalinux8-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
|
|
|
@ -43,6 +43,7 @@ vagrant_ubuntu20-flannel:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: false
|
||||||
|
|
||||||
vagrant_ubuntu16-kube-router-sep:
|
vagrant_ubuntu16-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
---
|
---
|
||||||
MD013: false
|
MD013: false
|
||||||
|
MD029: false
|
||||||
|
|
48
.pre-commit-config.yaml
Normal file
48
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
|
rev: v1.27.1
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [--strict]
|
||||||
|
|
||||||
|
- repo: https://github.com/markdownlint/markdownlint
|
||||||
|
rev: v0.11.0
|
||||||
|
hooks:
|
||||||
|
- id: markdownlint
|
||||||
|
args: [ -r, "~MD013,~MD029" ]
|
||||||
|
exclude: "^.git"
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
name: ansible-lint
|
||||||
|
entry: ansible-lint -v
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
additional_dependencies:
|
||||||
|
- .[community]
|
||||||
|
|
||||||
|
- id: ansible-syntax-check
|
||||||
|
name: ansible-syntax-check
|
||||||
|
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||||
|
language: python
|
||||||
|
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||||
|
|
||||||
|
- id: tox-inventory-builder
|
||||||
|
name: tox-inventory-builder
|
||||||
|
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: check-readme-versions
|
||||||
|
name: check-readme-versions
|
||||||
|
entry: tests/scripts/check_readme_versions.sh
|
||||||
|
language: script
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: ci-matrix
|
||||||
|
name: ci-matrix
|
||||||
|
entry: tests/scripts/md-table/test.sh
|
||||||
|
language: script
|
||||||
|
pass_filenames: false
|
|
@ -16,7 +16,12 @@ pip install -r tests/requirements.txt
|
||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
|
|
||||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
pre-commit install
|
||||||
|
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||||
|
```
|
||||||
|
|
||||||
#### Molecule
|
#### Molecule
|
||||||
|
|
||||||
|
@ -33,7 +38,9 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||||
1. Submit an issue describing your proposed change to the repo in question.
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||||
5. Submit a pull request.
|
5. Addess any pre-commit validation failures.
|
||||||
6. Work with the reviewers on their suggestions.
|
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
7. Submit a pull request.
|
||||||
|
8. Work with the reviewers on their suggestions.
|
||||||
|
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||||
|
|
|
@ -8,6 +8,8 @@ aliases:
|
||||||
- floryut
|
- floryut
|
||||||
- oomichi
|
- oomichi
|
||||||
- cristicalin
|
- cristicalin
|
||||||
|
- liupeng0518
|
||||||
|
- yankay
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- holmsten
|
- holmsten
|
||||||
- bozzo
|
- bozzo
|
||||||
|
@ -16,6 +18,7 @@ aliases:
|
||||||
- jayonlau
|
- jayonlau
|
||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
|
- yankay
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- atoms
|
- atoms
|
||||||
|
|
47
README.md
47
README.md
|
@ -57,10 +57,11 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
git checkout v2.20.0
|
||||||
|
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
@ -113,6 +114,7 @@ vagrant up
|
||||||
- [Air-Gap installation](docs/offline-environment.md)
|
- [Air-Gap installation](docs/offline-environment.md)
|
||||||
- [NTP](docs/ntp.md)
|
- [NTP](docs/ntp.md)
|
||||||
- [Hardening](docs/hardening.md)
|
- [Hardening](docs/hardening.md)
|
||||||
|
- [Mirror](docs/mirror.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
@ -120,44 +122,46 @@ vagrant up
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||||
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
|
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Fedora** 35, 36
|
- **Fedora** 35, 36
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
|
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Alma Linux** [8](docs/centos.md#centos-8)
|
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||||
- **Rocky Linux** [8](docs/centos.md#centos-8)
|
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||||
|
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||||
|
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||||
- [containerd](https://containerd.io/) v1.6.6
|
- [containerd](https://containerd.io/) v1.6.14
|
||||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.11.7
|
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.18.1
|
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||||
- [argocd](https://argoproj.github.io/) v2.4.7
|
- [argocd](https://argoproj.github.io/) v2.4.16
|
||||||
- [helm](https://helm.sh/) v3.9.2
|
- [helm](https://helm.sh/) v3.9.4
|
||||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
|
@ -168,7 +172,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
|
@ -177,7 +181,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.22**
|
- **Minimum required version of Kubernetes is v1.23**
|
||||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
|
@ -246,6 +250,7 @@ See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
|
- [Kubean](https://github.com/kubean-io/kubean)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
|
|
|
@ -9,5 +9,7 @@
|
||||||
#
|
#
|
||||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
atoms
|
|
||||||
mattymo
|
mattymo
|
||||||
|
floryut
|
||||||
|
oomichi
|
||||||
|
cristicalin
|
||||||
|
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
|
@ -31,7 +31,7 @@ SUPPORTED_OS = {
|
||||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
become: no
|
become: no
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.11.0
|
minimal_ansible_version: 2.11.0
|
||||||
maximal_ansible_version: 2.13.0
|
maximal_ansible_version: 2.14.0
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd:kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -59,7 +59,10 @@
|
||||||
vars:
|
vars:
|
||||||
etcd_cluster_setup: false
|
etcd_cluster_setup: false
|
||||||
etcd_events_cluster_setup: false
|
etcd_events_cluster_setup: false
|
||||||
when: etcd_deployment_type != "kubeadm"
|
when:
|
||||||
|
- etcd_deployment_type != "kubeadm"
|
||||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import inventory
|
import inventory
|
||||||
from test import support
|
from io import StringIO
|
||||||
import unittest
|
import unittest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
||||||
'access_ip': '10.90.0.3'}}}})
|
'access_ip': '10.90.0.3'}}}})
|
||||||
with mock.patch('builtins.open', mock_io):
|
with mock.patch('builtins.open', mock_io):
|
||||||
with self.assertRaises(SystemExit) as cm:
|
with self.assertRaises(SystemExit) as cm:
|
||||||
with support.captured_stdout() as stdout:
|
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||||
inventory.KubesprayInventory(
|
inventory.KubesprayInventory(
|
||||||
changed_hosts=["print_hostnames"],
|
changed_hosts=["print_hostnames"],
|
||||||
config_file="file")
|
config_file="file")
|
||||||
|
|
|
@ -14,12 +14,16 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
||||||
|
|
||||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||||
|
|
||||||
glusterfs_default_release: ""
|
```yaml
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
```
|
||||||
|
|
||||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||||
|
|
||||||
glusterfs_ppa_use: yes
|
```yaml
|
||||||
glusterfs_ppa_version: "3.5"
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.5"
|
||||||
|
```
|
||||||
|
|
||||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||||
|
|
||||||
|
@ -29,9 +33,11 @@ None.
|
||||||
|
|
||||||
## Example Playbook
|
## Example Playbook
|
||||||
|
|
||||||
|
```yaml
|
||||||
- hosts: server
|
- hosts: server
|
||||||
roles:
|
roles:
|
||||||
- geerlingguy.glusterfs
|
- geerlingguy.glusterfs
|
||||||
|
```
|
||||||
|
|
||||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,7 @@ terraform apply -var-file=credentials.tfvars
|
||||||
```
|
```
|
||||||
|
|
||||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||||
Ansible automatically detects bastion and changes ssh_args
|
|
||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
ssh -F ./ssh-bastion.conf user@$ip
|
ssh -F ./ssh-bastion.conf user@$ip
|
||||||
|
|
|
@ -31,9 +31,7 @@ The setup looks like following
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
* Terraform 0.13.0 or newer
|
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||||
|
|
||||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,8 @@ provider "exoscale" {}
|
||||||
module "kubernetes" {
|
module "kubernetes" {
|
||||||
source = "./modules/kubernetes-cluster"
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
|
zone = var.zone
|
||||||
machines = var.machines
|
machines = var.machines
|
||||||
|
|
||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
|
|
|
@ -2,18 +2,18 @@
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_worker}
|
${connection_strings_worker}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-master
|
kube-master
|
||||||
kube-node
|
kube-node
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s_cluster:vars]
|
||||||
network_id=${network_id}
|
network_id=${network_id}
|
||||||
|
|
|
@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|
||||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||||
|
@ -294,7 +295,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||||
using the `k8s_nodes` variable.
|
using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||||
|
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
|
@ -314,6 +316,7 @@ k8s_nodes = {
|
||||||
"az" = "sto3"
|
"az" = "sto3"
|
||||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
"floating_ip" = true
|
"floating_ip" = true
|
||||||
|
"extra_groups" = "calico_rr"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
|
@ -84,6 +84,7 @@ module "compute" {
|
||||||
supplementary_node_groups = var.supplementary_node_groups
|
supplementary_node_groups = var.supplementary_node_groups
|
||||||
master_allowed_ports = var.master_allowed_ports
|
master_allowed_ports = var.master_allowed_ports
|
||||||
worker_allowed_ports = var.worker_allowed_ports
|
worker_allowed_ports = var.worker_allowed_ports
|
||||||
|
bastion_allowed_ports = var.bastion_allowed_ports
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
master_server_group_policy = var.master_server_group_policy
|
master_server_group_policy = var.master_server_group_policy
|
||||||
node_server_group_policy = var.node_server_group_policy
|
node_server_group_policy = var.node_server_group_policy
|
||||||
|
@ -96,6 +97,11 @@ module "compute" {
|
||||||
network_router_id = module.network.router_id
|
network_router_id = module.network.router_id
|
||||||
network_id = module.network.network_id
|
network_id = module.network.network_id
|
||||||
use_existing_network = var.use_existing_network
|
use_existing_network = var.use_existing_network
|
||||||
|
private_subnet_id = module.network.subnet_id
|
||||||
|
|
||||||
|
depends_on = [
|
||||||
|
module.network.subnet_id
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "private_subnet_id" {
|
output "private_subnet_id" {
|
||||||
|
@ -111,7 +117,7 @@ output "router_id" {
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_master_fips" {
|
output "k8s_master_fips" {
|
||||||
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
|
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_node_fips" {
|
output "k8s_node_fips" {
|
||||||
|
|
|
@ -15,8 +15,11 @@ data "openstack_images_image_v2" "image_master" {
|
||||||
name = var.image_master == "" ? var.image : var.image_master
|
name = var.image_master == "" ? var.image : var.image_master
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "cloudinit" {
|
data "cloudinit_config" "cloudinit" {
|
||||||
template = file("${path.module}/templates/cloudinit.yaml")
|
part {
|
||||||
|
content_type = "text/cloud-config"
|
||||||
|
content = file("${path.module}/templates/cloudinit.yaml")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data "openstack_networking_network_v2" "k8s_network" {
|
data "openstack_networking_network_v2" "k8s_network" {
|
||||||
|
@ -82,6 +85,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
|
||||||
|
count = length(var.bastion_allowed_ports)
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
|
||||||
|
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
|
||||||
|
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
|
||||||
|
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-k8s"
|
name = "${var.cluster_name}-k8s"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
@ -195,6 +209,9 @@ resource "openstack_networking_port_v2" "bastion_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -207,7 +224,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_bastion
|
flavor_id = var.flavor_bastion
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
|
@ -245,6 +262,9 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -258,7 +278,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
|
@ -305,6 +325,9 @@ resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -363,6 +386,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -376,7 +402,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
|
@ -423,6 +449,9 @@ resource "openstack_networking_port_v2" "etcd_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -436,7 +465,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_etcd
|
flavor_id = var.flavor_etcd
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||||
|
@ -477,6 +506,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -531,6 +563,9 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -544,7 +579,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||||
|
@ -586,6 +621,9 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -599,7 +637,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_k8s_node
|
flavor_id = var.flavor_k8s_node
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
|
@ -646,6 +684,9 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -659,7 +700,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_k8s_node
|
flavor_id = var.flavor_k8s_node
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
|
@ -701,6 +742,9 @@ resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
@ -714,7 +758,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = each.value.flavor
|
flavor_id = each.value.flavor
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
|
@ -742,7 +786,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
|
||||||
depends_on = var.network_router_id
|
depends_on = var.network_router_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -760,6 +804,9 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
|
|
@ -136,6 +136,10 @@ variable "worker_allowed_ports" {
|
||||||
type = list
|
type = list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_ports" {
|
||||||
|
type = list
|
||||||
|
}
|
||||||
|
|
||||||
variable "use_access_ip" {}
|
variable "use_access_ip" {}
|
||||||
|
|
||||||
variable "master_server_group_policy" {
|
variable "master_server_group_policy" {
|
||||||
|
@ -185,3 +189,7 @@ variable "port_security_enabled" {
|
||||||
variable "force_null_port_security" {
|
variable "force_null_port_security" {
|
||||||
type = bool
|
type = bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "private_subnet_id" {
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
|
@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_ports" {
|
||||||
|
type = list(any)
|
||||||
|
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
variable "use_access_ip" {
|
variable "use_access_ip" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -251,8 +251,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -267,8 +267,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -283,8 +283,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv6"
|
family = "IPv6"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -299,8 +299,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv6"
|
family = "IPv6"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -315,8 +315,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "NTP Port"
|
comment = "NTP Port"
|
||||||
destination_port_end = "123"
|
source_port_end = "123"
|
||||||
destination_port_start = "123"
|
source_port_start = "123"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -325,6 +325,20 @@ resource "upcloud_firewall_rules" "master" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
firewall_rule {
|
firewall_rule {
|
||||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
|
@ -394,8 +408,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -410,8 +424,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -426,8 +440,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv6"
|
family = "IPv6"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -442,8 +456,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "UpCloud DNS"
|
comment = "UpCloud DNS"
|
||||||
destination_port_end = "53"
|
source_port_end = "53"
|
||||||
destination_port_start = "53"
|
source_port_start = "53"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv6"
|
family = "IPv6"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -458,8 +472,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
content {
|
content {
|
||||||
action = "accept"
|
action = "accept"
|
||||||
comment = "NTP Port"
|
comment = "NTP Port"
|
||||||
destination_port_end = "123"
|
source_port_end = "123"
|
||||||
destination_port_start = "123"
|
source_port_start = "123"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
family = "IPv4"
|
family = "IPv4"
|
||||||
protocol = firewall_rule.value
|
protocol = firewall_rule.value
|
||||||
|
@ -468,6 +482,20 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
firewall_rule {
|
firewall_rule {
|
||||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||||
direction = "in"
|
direction = "in"
|
||||||
|
|
|
@ -35,9 +35,7 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
* Terraform 0.13.0 or newer
|
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||||
|
|
||||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
|
|
13
deploy-sample.sh
Normal file
13
deploy-sample.sh
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/bash
|
||||||
|
ansible-playbook -i inventory/${MY_INVENTORY}/inventory.ini --become --user=${MY_SSH_USER} --become-user=root cluster.yml -e etcd_retries=42
|
||||||
|
|
||||||
|
export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf
|
||||||
|
echo
|
||||||
|
echo "execute the following in any shell where you want to connect to your cluster with kubectl : "
|
||||||
|
echo "export KUBECONFIG=$(pwd)/inventory/c12s-sample/artifacts/admin.conf"
|
||||||
|
|
||||||
|
kubectl create namespace infra
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo ArgoCD admin password :
|
||||||
|
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
|
|
@ -37,6 +37,8 @@
|
||||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||||
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
||||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||||
|
* [UOS Linux](docs/uoslinux.md)
|
||||||
|
* [openEuler notes](docs/openeuler.md))
|
||||||
* CRI
|
* CRI
|
||||||
* [Containerd](docs/containerd.md)
|
* [Containerd](docs/containerd.md)
|
||||||
* [Docker](docs/docker.md)
|
* [Docker](docs/docker.md)
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
## Installing Ansible
|
## Installing Ansible
|
||||||
|
|
||||||
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
||||||
Depending on your available python version you may be limited in chooding which ansible version to use.
|
Depending on your available python version you may be limited in choosing which ansible version to use.
|
||||||
|
|
||||||
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ For more information about Ansible and bastion hosts, read
|
||||||
|
|
||||||
## Mitogen
|
## Mitogen
|
||||||
|
|
||||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
|
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
|
||||||
|
|
||||||
## Beyond ansible 2.9
|
## Beyond ansible 2.9
|
||||||
|
|
||||||
|
|
|
@ -57,19 +57,28 @@ The name of the network security group your instances are in, can be retrieved v
|
||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
|
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
|
||||||
|
```
|
||||||
|
|
||||||
Display name, identifier-uri, homepage and the password can be chosen
|
Display name, identifier-uri, homepage and the password can be chosen
|
||||||
|
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
|
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`az ad sp create --id AppId`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad sp create --id AppId
|
||||||
|
```
|
||||||
|
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
|
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
|
||||||
|
```ShellSession
|
||||||
|
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
|
||||||
|
```
|
||||||
|
|
||||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
|
|
@ -71,14 +71,27 @@ The name of the resource group that contains the route table. Defaults to `azur
|
||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
|
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
|
||||||
|
```
|
||||||
|
|
||||||
display name, identifier-uri, homepage and the password can be chosen
|
display name, identifier-uri, homepage and the password can be chosen
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
|
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`az ad sp create --id AppId`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad sp create --id AppId
|
||||||
|
```
|
||||||
|
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
|
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
|
||||||
|
```ShellSession
|
||||||
|
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
|
||||||
|
```
|
||||||
|
|
||||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
|
|
@ -48,11 +48,13 @@ The `kubespray-defaults` role is expected to be run before this role.
|
||||||
|
|
||||||
Remember to disable fact gathering since Python might not be present on hosts.
|
Remember to disable fact gathering since Python might not be present on hosts.
|
||||||
|
|
||||||
- hosts: all
|
```yaml
|
||||||
gather_facts: false # not all hosts might be able to run modules yet
|
- hosts: all
|
||||||
roles:
|
gather_facts: false # not all hosts might be able to run modules yet
|
||||||
- kubespray-defaults
|
roles:
|
||||||
- bootstrap-os
|
- kubespray-defaults
|
||||||
|
- bootstrap-os
|
||||||
|
```
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||||
|
|
||||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
The following variables need to be set:
|
The following variables need to be set as follow:
|
||||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
|
||||||
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
```yml
|
||||||
|
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
|
||||||
|
nat_outgoing: false # (optional) NAT outgoing (default value: true).
|
||||||
|
```
|
||||||
|
|
||||||
|
And you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
|
@ -124,8 +129,7 @@ You need to edit your inventory and add:
|
||||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||||
group of `k8s_cluster` group.
|
group of `k8s_cluster` group.
|
||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
|
||||||
|
|
||||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||||
|
|
||||||
|
@ -172,6 +176,8 @@ node5
|
||||||
|
|
||||||
[rack0:vars]
|
[rack0:vars]
|
||||||
cluster_id="1.0.0.1"
|
cluster_id="1.0.0.1"
|
||||||
|
calico_rr_id=rr1
|
||||||
|
calico_group_id=rr1
|
||||||
```
|
```
|
||||||
|
|
||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
|
@ -199,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
|
||||||
calico_healthhost: "0.0.0.0"
|
calico_healthhost: "0.0.0.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Optional : Configure VXLAN hardware Offload
|
||||||
|
|
||||||
|
Because of the Issue [projectcalico/calico#4727](https://github.com/projectcalico/calico/issues/4727), The VXLAN Offload is disable by default. It can be configured like this:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
calico_feature_detect_override: "ChecksumOffloadBroken=true" # The vxlan offload will enabled with kernel version is > 5.7 (It may cause problem on buggy NIC driver)
|
||||||
|
```
|
||||||
|
|
||||||
### Optional : Configure Calico Node probe timeouts
|
### Optional : Configure Calico Node probe timeouts
|
||||||
|
|
||||||
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
||||||
|
@ -212,7 +226,7 @@ calico_node_readinessprobe_timeout: 10
|
||||||
|
|
||||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
||||||
|
|
||||||
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
*IP in IP* and *VXLAN* is mutually exclusive modes.
|
||||||
|
|
||||||
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
||||||
|
|
||||||
|
@ -245,14 +259,14 @@ calico_network_backend: 'bird'
|
||||||
|
|
||||||
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
||||||
|
|
||||||
Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
||||||
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
|
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray.
|
||||||
|
|
||||||
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
||||||
|
|
||||||
|
@ -369,7 +383,7 @@ use_localhost_as_kubeapi_loadbalancer: true
|
||||||
|
|
||||||
### Tunneled versus Direct Server Return
|
### Tunneled versus Direct Server Return
|
||||||
|
|
||||||
By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||||
|
|
||||||
To configure DSR:
|
To configure DSR:
|
||||||
|
|
||||||
|
@ -395,7 +409,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
|
||||||
|
|
||||||
## Wireguard Encryption
|
## Wireguard Encryption
|
||||||
|
|
||||||
Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||||
|
|
||||||
To enable wireguard support:
|
To enable wireguard support:
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,12 @@
|
||||||
|
|
||||||
## CentOS 7
|
## CentOS 7
|
||||||
|
|
||||||
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||||
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
||||||
|
|
||||||
## CentOS 8
|
## CentOS 8
|
||||||
|
|
||||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||||
The only tested configuration for now is using Calico CNI
|
The only tested configuration for now is using Calico CNI
|
||||||
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
|
||||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
@ -35,6 +36,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
@ -54,6 +56,7 @@ fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
|
@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
|
||||||
## Choose Cilium version
|
## Choose Cilium version
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
cilium_version: v1.11.3
|
cilium_version: v1.12.1
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add variable to config
|
## Add variable to config
|
||||||
|
@ -121,6 +121,23 @@ cilium_encryption_type: "wireguard"
|
||||||
|
|
||||||
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
|
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
|
||||||
|
|
||||||
|
## Bandwidth Manager
|
||||||
|
|
||||||
|
Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||||
|
|
||||||
|
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||||
|
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||||
|
|
||||||
|
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||||
|
|
||||||
|
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
|
||||||
|
|
||||||
|
To use this function, set the following parameters
|
||||||
|
|
||||||
|
```yml
|
||||||
|
cilium_enable_bandwidth_manager: true
|
||||||
|
```
|
||||||
|
|
||||||
## Install Cilium Hubble
|
## Install Cilium Hubble
|
||||||
|
|
||||||
k8s-net-cilium.yml:
|
k8s-net-cilium.yml:
|
||||||
|
@ -153,3 +170,32 @@ cilium_hubble_metrics:
|
||||||
```
|
```
|
||||||
|
|
||||||
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
|
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
|
||||||
|
|
||||||
|
## Upgrade considerations
|
||||||
|
|
||||||
|
### Rolling-restart timeouts
|
||||||
|
|
||||||
|
Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update.
|
||||||
|
|
||||||
|
As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster.
|
||||||
|
|
||||||
|
As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml)
|
||||||
|
are not appropriate for large clusters.
|
||||||
|
|
||||||
|
This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance.
|
||||||
|
This is configured by the following values:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Configure how long to wait for the Cilium DaemonSet to be ready again
|
||||||
|
cilium_rolling_restart_wait_retries_count: 30
|
||||||
|
cilium_rolling_restart_wait_retries_delay_seconds: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no
|
||||||
|
drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns.
|
||||||
|
|
||||||
|
Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it
|
||||||
|
to become ready.
|
||||||
|
|
||||||
|
Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You
|
||||||
|
probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you.
|
||||||
|
|
|
@ -39,4 +39,68 @@ containerd_registries:
|
||||||
image_command_tool: crictl
|
image_command_tool: crictl
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Containerd Runtimes
|
||||||
|
|
||||||
|
Containerd supports multiple runtime configurations that can be used with
|
||||||
|
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
|
||||||
|
details of containerd configuration.
|
||||||
|
|
||||||
|
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_runc_runtime:
|
||||||
|
name: runc
|
||||||
|
type: "io.containerd.runc.v2"
|
||||||
|
engine: ""
|
||||||
|
root: ""
|
||||||
|
options:
|
||||||
|
systemdCgroup: "false"
|
||||||
|
binaryName: /usr/local/bin/my-runc
|
||||||
|
base_runtime_spec: cri-base.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Further runtimes can be configured with `containerd_additional_runtimes`, which
|
||||||
|
is a list of such dictionaries.
|
||||||
|
|
||||||
|
Default runtime can be changed by setting `containerd_default_runtime`.
|
||||||
|
|
||||||
|
#### Base runtime specs and limiting number of open files
|
||||||
|
|
||||||
|
`base_runtime_spec` key in a runtime dictionary is used to explicitly
|
||||||
|
specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`,
|
||||||
|
which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and
|
||||||
|
updated to include a custom setting for maximum number of file descriptors per
|
||||||
|
container.
|
||||||
|
|
||||||
|
You can change maximum number of file descriptors per container for the default
|
||||||
|
`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile`
|
||||||
|
variable.
|
||||||
|
|
||||||
|
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_base_runtime_specs:
|
||||||
|
cri-spec-custom.json: |
|
||||||
|
{
|
||||||
|
"ociVersion": "1.0.2-dev",
|
||||||
|
"process": {
|
||||||
|
"user": {
|
||||||
|
"uid": 0,
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The files in this dict will be placed in containerd config directory,
|
||||||
|
`/etc/containerd` by default. The files can then be referenced by filename in a
|
||||||
|
runtime:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_runc_runtime:
|
||||||
|
name: runc
|
||||||
|
base_runtime_spec: cri-spec-custom.json
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
[containerd]: https://containerd.io/
|
[containerd]: https://containerd.io/
|
||||||
|
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||||
|
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||||
|
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||||
|
|
|
@ -3,34 +3,39 @@
|
||||||
Debian Jessie installation Notes:
|
Debian Jessie installation Notes:
|
||||||
|
|
||||||
- Add
|
- Add
|
||||||
|
|
||||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
```ini
|
||||||
|
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||||
to /etc/default/grub. Then update with
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
sudo update-grub
|
|
||||||
sudo update-grub2
|
|
||||||
sudo reboot
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
to `/etc/default/grub`. Then update with
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
sudo update-grub
|
||||||
|
sudo update-grub2
|
||||||
|
sudo reboot
|
||||||
|
```
|
||||||
|
|
||||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||||
|
|
||||||
```apt-get -t jessie-backports install systemd```
|
```ShellSession
|
||||||
|
apt-get -t jessie-backports install systemd
|
||||||
|
```
|
||||||
|
|
||||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||||
|
|
||||||
- Add the Ansible repository and install Ansible to get a proper version
|
- Add the Ansible repository and install Ansible to get a proper version
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
sudo add-apt-repository ppa:ansible/ansible
|
sudo add-apt-repository ppa:ansible/ansible
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install ansible
|
sudo apt-get install ansible
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- Install Jinja2 and Python-Netaddr
|
- Install Jinja2 and Python-Netaddr
|
||||||
|
|
||||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
```ShellSession
|
||||||
|
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
|
||||||
|
```
|
||||||
|
|
||||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
|
|
|
@ -19,6 +19,14 @@ ndots value to be used in ``/etc/resolv.conf``
|
||||||
It is important to note that multiple search domains combined with high ``ndots``
|
It is important to note that multiple search domains combined with high ``ndots``
|
||||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||||
|
|
||||||
|
## dns_timeout
|
||||||
|
|
||||||
|
timeout value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
|
## dns_attempts
|
||||||
|
|
||||||
|
attempts value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
### searchdomains
|
### searchdomains
|
||||||
|
|
||||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
|
@ -26,6 +34,8 @@ Custom search domains to be added in addition to the cluster search domains (``d
|
||||||
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||||
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
||||||
|
|
||||||
|
`remove_default_searchdomains: true` will remove the default cluster search domains.
|
||||||
|
|
||||||
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||||
additional search domains. Please take this into the accounts for the limits.
|
additional search domains. Please take this into the accounts for the limits.
|
||||||
|
|
||||||
|
@ -40,6 +50,12 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
|
||||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||||
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||||
|
|
||||||
|
### dns_upstream_forward_extra_opts
|
||||||
|
|
||||||
|
Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see <https://coredns.io/plugins/forward/> for details).
|
||||||
|
These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable.
|
||||||
|
By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`).
|
||||||
|
|
||||||
### coredns_external_zones
|
### coredns_external_zones
|
||||||
|
|
||||||
Array of optional external zones to coredns forward queries to. It's injected into
|
Array of optional external zones to coredns forward queries to. It's injected into
|
||||||
|
@ -62,6 +78,13 @@ coredns_external_zones:
|
||||||
nameservers:
|
nameservers:
|
||||||
- 192.168.0.53
|
- 192.168.0.53
|
||||||
cache: 0
|
cache: 0
|
||||||
|
- zones:
|
||||||
|
- mydomain.tld
|
||||||
|
nameservers:
|
||||||
|
- 10.233.0.3
|
||||||
|
cache: 5
|
||||||
|
rewrite:
|
||||||
|
- name stop website.tld website.namespace.svc.cluster.local
|
||||||
```
|
```
|
||||||
|
|
||||||
or as INI
|
or as INI
|
||||||
|
@ -207,7 +230,7 @@ cluster service names.
|
||||||
|
|
||||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||||
|
|
||||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md).
|
||||||
|
|
||||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||||
|
|
||||||
|
@ -263,7 +286,8 @@ nodelocaldns_secondary_skew_seconds: 5
|
||||||
|
|
||||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
limits are a 4 names and 239 chars respectively.
|
limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
|
||||||
|
added you are back to 6 names.
|
||||||
|
|
||||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||||
|
|
|
@ -54,7 +54,7 @@ Prepare ignition and serve via http (a.e. python -m http.server )
|
||||||
|
|
||||||
### create guest
|
### create guest
|
||||||
|
|
||||||
```shell script
|
```ShellSeasion
|
||||||
machine_name=myfcos1
|
machine_name=myfcos1
|
||||||
ignition_url=http://mywebserver/fcos.ign
|
ignition_url=http://mywebserver/fcos.ign
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
Flannel is a network fabric for containers, designed for Kubernetes
|
Flannel is a network fabric for containers, designed for Kubernetes
|
||||||
|
|
||||||
|
Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard`
|
||||||
|
|
||||||
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
||||||
|
|
||||||
## Verifying flannel install
|
## Verifying flannel install
|
||||||
|
|
|
@ -2,15 +2,19 @@
|
||||||
|
|
||||||
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
||||||
|
|
||||||
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
|
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
--cloud-provider=gce
|
--cloud-provider=gce
|
||||||
--cloud-config=/etc/kubernetes/cloud-config
|
--cloud-config=/etc/kubernetes/cloud-config
|
||||||
|
```
|
||||||
|
|
||||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
|
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
cloud_provider: gce
|
cloud_provider: gce
|
||||||
gce_node_tags: k8s-lb
|
gce_node_tags: k8s-lb
|
||||||
|
```
|
||||||
|
|
||||||
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
|
When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall.
|
||||||
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
||||||
|
|
|
@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
|
||||||
---
|
---
|
||||||
|
|
||||||
## kube-apiserver
|
## kube-apiserver
|
||||||
authorization_modes: ['Node','RBAC']
|
authorization_modes: ['Node', 'RBAC']
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_apiserver_feature_gates: ['AppArmor=true']
|
# kube_apiserver_feature_gates: ['AppArmor=true']
|
||||||
kube_apiserver_request_timeout: 120s
|
kube_apiserver_request_timeout: 120s
|
||||||
kube_apiserver_service_account_lookup: true
|
kube_apiserver_service_account_lookup: true
|
||||||
|
|
||||||
|
@ -41,7 +41,18 @@ kube_encrypt_secret_data: true
|
||||||
kube_encryption_resources: [secrets]
|
kube_encryption_resources: [secrets]
|
||||||
kube_encryption_algorithm: "secretbox"
|
kube_encryption_algorithm: "secretbox"
|
||||||
|
|
||||||
kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
|
kube_apiserver_enable_admission_plugins:
|
||||||
|
- EventRateLimit
|
||||||
|
- AlwaysPullImages
|
||||||
|
- ServiceAccount
|
||||||
|
- NamespaceLifecycle
|
||||||
|
- NodeRestriction
|
||||||
|
- LimitRanger
|
||||||
|
- ResourceQuota
|
||||||
|
- MutatingAdmissionWebhook
|
||||||
|
- ValidatingAdmissionWebhook
|
||||||
|
- PodNodeSelector
|
||||||
|
- PodSecurity
|
||||||
kube_apiserver_admission_control_config_file: true
|
kube_apiserver_admission_control_config_file: true
|
||||||
# EventRateLimit plugin configuration
|
# EventRateLimit plugin configuration
|
||||||
kube_apiserver_admission_event_rate_limits:
|
kube_apiserver_admission_event_rate_limits:
|
||||||
|
@ -60,7 +71,7 @@ kube_profiling: false
|
||||||
kube_controller_manager_bind_address: 127.0.0.1
|
kube_controller_manager_bind_address: 127.0.0.1
|
||||||
kube_controller_terminated_pod_gc_threshold: 50
|
kube_controller_terminated_pod_gc_threshold: 50
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"]
|
# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
|
||||||
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||||
|
|
||||||
## kube-scheduler
|
## kube-scheduler
|
||||||
|
@ -68,13 +79,12 @@ kube_scheduler_bind_address: 127.0.0.1
|
||||||
kube_kubeadm_scheduler_extra_args:
|
kube_kubeadm_scheduler_extra_args:
|
||||||
profiling: false
|
profiling: false
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_scheduler_feature_gates: ["AppArmor=true"]
|
# kube_scheduler_feature_gates: ["AppArmor=true"]
|
||||||
|
|
||||||
## etcd
|
## etcd
|
||||||
etcd_deployment_type: kubeadm
|
etcd_deployment_type: kubeadm
|
||||||
|
|
||||||
## kubelet
|
## kubelet
|
||||||
kubelet_authorization_mode_webhook: true
|
|
||||||
kubelet_authentication_token_webhook: true
|
kubelet_authentication_token_webhook: true
|
||||||
kube_read_only_port: 0
|
kube_read_only_port: 0
|
||||||
kubelet_rotate_server_certificates: true
|
kubelet_rotate_server_certificates: true
|
||||||
|
@ -83,12 +93,24 @@ kubelet_event_record_qps: 1
|
||||||
kubelet_rotate_certificates: true
|
kubelet_rotate_certificates: true
|
||||||
kubelet_streaming_connection_idle_timeout: "5m"
|
kubelet_streaming_connection_idle_timeout: "5m"
|
||||||
kubelet_make_iptables_util_chains: true
|
kubelet_make_iptables_util_chains: true
|
||||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
|
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
|
||||||
kubelet_seccomp_default: true
|
kubelet_seccomp_default: true
|
||||||
|
kubelet_systemd_hardening: true
|
||||||
|
# In case you have multiple interfaces in your
|
||||||
|
# control plane nodes and you want to specify the right
|
||||||
|
# IP addresses, kubelet_secure_addresses allows you
|
||||||
|
# to specify the IP from which the kubelet
|
||||||
|
# will receive the packets.
|
||||||
|
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
|
||||||
|
|
||||||
# additional configurations
|
# additional configurations
|
||||||
kube_owner: root
|
kube_owner: root
|
||||||
kube_cert_group: root
|
kube_cert_group: root
|
||||||
|
|
||||||
|
# create a default Pod Security Configuration and deny running of insecure pods
|
||||||
|
# kube_system namespace is exempted by default
|
||||||
|
kube_pod_security_use_default: true
|
||||||
|
kube_pod_security_default_enforce: restricted
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's take a deep look to the resultant **kubernetes** configuration:
|
Let's take a deep look to the resultant **kubernetes** configuration:
|
||||||
|
@ -98,6 +120,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
||||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
|
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
|
||||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
||||||
|
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||||
|
![kubelet hardening](img/kubelet-hardening.png)
|
||||||
|
|
||||||
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
||||||
|
|
||||||
|
|
BIN
docs/img/kubelet-hardening.png
Normal file
BIN
docs/img/kubelet-hardening.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.5 MiB |
|
@ -6,84 +6,100 @@
|
||||||
* List of all forked repos could be retrieved from github page of original project.
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
||||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
|
||||||
Git will create `.gitmodules` file in your existent ansible repo:
|
```ShellSession
|
||||||
|
git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray
|
||||||
|
```
|
||||||
|
|
||||||
|
Git will create `.gitmodules` file in your existent ansible repo:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
[submodule "3d/kubespray"]
|
[submodule "3d/kubespray"]
|
||||||
path = 3d/kubespray
|
path = 3d/kubespray
|
||||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Configure git to show submodule status:
|
3. Configure git to show submodule status:
|
||||||
```git config --global status.submoduleSummary true```
|
|
||||||
|
```ShellSession
|
||||||
|
git config --global status.submoduleSummary true
|
||||||
|
```
|
||||||
|
|
||||||
4. Add *original* kubespray repo as upstream:
|
4. Add *original* kubespray repo as upstream:
|
||||||
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
|
||||||
|
```ShellSession
|
||||||
|
cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git
|
||||||
|
```
|
||||||
|
|
||||||
5. Sync your master branch with upstream:
|
5. Sync your master branch with upstream:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout master
|
git checkout master
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
git push origin master
|
git push origin master
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Create a new branch which you will use in your working environment:
|
6. Create a new branch which you will use in your working environment:
|
||||||
```git checkout -b work```
|
|
||||||
|
```ShellSession
|
||||||
|
git checkout -b work
|
||||||
|
```
|
||||||
|
|
||||||
***Never*** use master branch of your repository for your commits.
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
||||||
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
||||||
|
|
||||||
8. ```ini
|
```ini
|
||||||
...
|
...
|
||||||
library = ./library/:3d/kubespray/library/
|
library = ./library/:3d/kubespray/library/
|
||||||
roles_path = ./roles/:3d/kubespray/roles/
|
roles_path = ./roles/:3d/kubespray/roles/
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
|
||||||
|
|
||||||
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
...
|
...
|
||||||
#Kargo groups:
|
#Kubespray groups:
|
||||||
[kube_node:children]
|
[kube_node:children]
|
||||||
kubenode
|
kubenode
|
||||||
|
|
||||||
[k8s_cluster:children]
|
[k8s_cluster:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
|
|
||||||
[etcd:children]
|
[etcd:children]
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
[kube_control_plane:children]
|
[kube_control_plane:children]
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
[kubespray:children]
|
[kubespray:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
```
|
```
|
||||||
|
|
||||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
- name: Import kubespray playbook
|
- name: Import kubespray playbook
|
||||||
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
|
||||||
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
@ -95,37 +111,78 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
|
||||||
2. Change working directory to git submodule directory (3d/kubespray).
|
2. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
3. Setup desired user.name and user.email for submodule.
|
3. Setup desired user.name and user.email for submodule.
|
||||||
If kubespray is only one submodule in your repo you could use something like:
|
|
||||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'```
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'
|
||||||
|
```
|
||||||
|
|
||||||
4. Sync with upstream master:
|
4. Sync with upstream master:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
git push origin master
|
git push origin master
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Create new branch for the specific fixes that you want to contribute:
|
5. Create new branch for the specific fixes that you want to contribute:
|
||||||
```git checkout -b fixes-name-date-index```
|
|
||||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
```ShellSession
|
||||||
|
git checkout -b fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git cherry-pick <COMMIT_HASH>
|
git cherry-pick <COMMIT_HASH>
|
||||||
```
|
```
|
||||||
|
|
||||||
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
|
||||||
|
Also you could use interactive rebase
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git rebase -i HEAD~10
|
||||||
|
```
|
||||||
|
|
||||||
|
to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
Check that you're on correct branch:
|
|
||||||
```git status```
|
|
||||||
And pull changes from upstream (if any):
|
|
||||||
```git pull --rebase upstream master```
|
|
||||||
|
|
||||||
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
Check that you're on correct branch:
|
||||||
|
|
||||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
```ShellSession
|
||||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
git status
|
||||||
|
```
|
||||||
|
|
||||||
|
And pull changes from upstream (if any):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git pull --rebase upstream master
|
||||||
|
```
|
||||||
|
|
||||||
|
9. Now push your changes to your **fork** repo with
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push
|
||||||
|
```
|
||||||
|
|
||||||
|
If your branch doesn't exists on github, git will propose you to use something like
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push --set-upstream origin fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push origin --delete fixes-name-date-index
|
||||||
|
git branch -D fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
and start whole process from the beginning.
|
||||||
|
|
||||||
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
|
|
|
@ -2,6 +2,14 @@
|
||||||
|
|
||||||
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
|
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kube_proxy_strict_arp: true
|
||||||
|
```
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
You have to explicitly enable the kube-vip extension:
|
You have to explicitly enable the kube-vip extension:
|
||||||
|
@ -11,7 +19,7 @@ kube_vip_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
You also need to enable
|
You also need to enable
|
||||||
[kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# HA for control-plane, requires a VIP
|
# HA for control-plane, requires a VIP
|
||||||
|
@ -28,16 +36,16 @@ kube_vip_services_enabled: false
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: When using `kube-vip` as LoadBalancer for services,
|
> Note: When using `kube-vip` as LoadBalancer for services,
|
||||||
[additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
|
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/)
|
||||||
are needed.
|
are needed.
|
||||||
|
|
||||||
If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
|
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) :
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kube_vip_arp_enabled: true
|
kube_vip_arp_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
|
If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) :
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kube_vip_bgp_enabled: true
|
kube_vip_bgp_enabled: true
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
# Local Storage Provisioner
|
# Local Static Storage Provisioner
|
||||||
|
|
||||||
The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume)
|
The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner)
|
||||||
is NOT a dynamic storage provisioner as you would
|
is NOT a dynamic storage provisioner as you would
|
||||||
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
||||||
all mounts under the host_dir of the specified storage class.
|
all mounts under the `host_dir` of the specified storage class.
|
||||||
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
|
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -16,15 +17,18 @@ local_volume_provisioner_storage_classes:
|
||||||
host_dir: /mnt/fast-disks
|
host_dir: /mnt/fast-disks
|
||||||
mount_dir: /mnt/fast-disks
|
mount_dir: /mnt/fast-disks
|
||||||
block_cleaner_command:
|
block_cleaner_command:
|
||||||
- "/scripts/shred.sh"
|
- "/scripts/shred.sh"
|
||||||
- "2"
|
- "2"
|
||||||
volume_mode: Filesystem
|
volume_mode: Filesystem
|
||||||
fs_type: ext4
|
fs_type: ext4
|
||||||
```
|
```
|
||||||
|
|
||||||
For each key in `local_volume_provisioner_storage_classes` a storageClass with the
|
For each key in `local_volume_provisioner_storage_classes` a "storage class" with
|
||||||
same name is created. The subkeys of each storage class are converted to camelCase and added
|
the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`.
|
||||||
as attributes to the storageClass.
|
The subkeys of each storage class in `local_volume_provisioner_storage_classes`
|
||||||
|
are converted to camelCase and added as attributes to the storage class in the
|
||||||
|
ConfigMap.
|
||||||
|
|
||||||
The result of the above example is:
|
The result of the above example is:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -43,80 +47,85 @@ data:
|
||||||
fsType: ext4
|
fsType: ext4
|
||||||
```
|
```
|
||||||
|
|
||||||
The default StorageClass is local-storage on /mnt/disks,
|
Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also
|
||||||
the rest of this doc will use that path as an example.
|
created for each storage class:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kubectl get storageclasses.storage.k8s.io
|
||||||
|
NAME PROVISIONER RECLAIMPOLICY
|
||||||
|
fast-disks kubernetes.io/no-provisioner Delete
|
||||||
|
local-storage kubernetes.io/no-provisioner Delete
|
||||||
|
```
|
||||||
|
|
||||||
|
The default StorageClass is `local-storage` on `/mnt/disks`;
|
||||||
|
the rest of this documentation will use that path as an example.
|
||||||
|
|
||||||
## Examples to create local storage volumes
|
## Examples to create local storage volumes
|
||||||
|
|
||||||
1. tmpfs method:
|
1. Using tmpfs
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
for vol in vol1 vol2 vol3; do
|
for vol in vol1 vol2 vol3; do
|
||||||
mkdir /mnt/disks/$vol
|
mkdir /mnt/disks/$vol
|
||||||
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
The tmpfs method is not recommended for production because the mount is not
|
The tmpfs method is not recommended for production because the mounts are not
|
||||||
persistent and data will be deleted on reboot.
|
persistent and data will be deleted on reboot.
|
||||||
|
|
||||||
1. Mount physical disks
|
1. Mount physical disks
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
mkdir /mnt/disks/ssd1
|
mkdir /mnt/disks/ssd1
|
||||||
mount /dev/vdb1 /mnt/disks/ssd1
|
mount /dev/vdb1 /mnt/disks/ssd1
|
||||||
```
|
```
|
||||||
|
|
||||||
Physical disks are recommended for production environments because it offers
|
Physical disks are recommended for production environments because it offers
|
||||||
complete isolation in terms of I/O and capacity.
|
complete isolation in terms of I/O and capacity.
|
||||||
|
|
||||||
1. Mount unpartitioned physical devices
|
1. Mount unpartitioned physical devices
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
||||||
ln -s $disk /mnt/disks
|
ln -s $disk /mnt/disks
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
This saves time of precreating filesystems. Note that your storageclass must have
|
This saves time of precreating filesystems. Note that your storageclass must have
|
||||||
volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
|
`volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the
|
||||||
disk will be added as a raw block device.
|
disk will be added as a raw block device.
|
||||||
|
|
||||||
|
1. PersistentVolumes with `volumeMode="Block"`
|
||||||
|
|
||||||
|
Just like above, you can create PersistentVolumes with volumeMode `Block`
|
||||||
|
by creating a symbolic link under discovery directory to the block device on
|
||||||
|
the node, if you set `volume_mode` to `"Block"`. This will create a volume
|
||||||
|
presented into a Pod as a block device, without any filesystem on it.
|
||||||
|
|
||||||
1. File-backed sparsefile method
|
1. File-backed sparsefile method
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
truncate /mnt/disks/disk5 --size 2G
|
truncate /mnt/disks/disk5 --size 2G
|
||||||
mkfs.ext4 /mnt/disks/disk5
|
mkfs.ext4 /mnt/disks/disk5
|
||||||
mkdir /mnt/disks/vol5
|
mkdir /mnt/disks/vol5
|
||||||
mount /mnt/disks/disk5 /mnt/disks/vol5
|
mount /mnt/disks/disk5 /mnt/disks/vol5
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have a development environment and only one disk, this is the best way
|
If you have a development environment and only one disk, this is the best way
|
||||||
to limit the quota of persistent volumes.
|
to limit the quota of persistent volumes.
|
||||||
|
|
||||||
1. Simple directories
|
1. Simple directories
|
||||||
|
|
||||||
In a development environment using `mount --bind` works also, but there is no capacity
|
In a development environment, using `mount --bind` works also, but there is no capacity
|
||||||
management.
|
management.
|
||||||
|
|
||||||
1. Block volumeMode PVs
|
|
||||||
|
|
||||||
Create a symbolic link under discovery directory to the block device on the node. To use
|
|
||||||
raw block devices in pods, volume_type should be set to "Block".
|
|
||||||
|
|
||||||
## Usage notes
|
## Usage notes
|
||||||
|
|
||||||
Beta PV.NodeAffinity field is used by default. If running against an older K8s
|
Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for
|
||||||
version, the useAlphaAPI flag must be set in the configMap.
|
Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be
|
||||||
|
|
||||||
The volume provisioner cannot calculate volume sizes correctly, so you should
|
|
||||||
delete the daemonset pod on the relevant host after creating volumes. The pod
|
|
||||||
will be recreated and read the size correctly.
|
|
||||||
|
|
||||||
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
|
|
||||||
Flatcar Container Linux). Pods with persistent volume claims will not be
|
|
||||||
able to start if the mounts become unavailable.
|
able to start if the mounts become unavailable.
|
||||||
|
|
||||||
## Further reading
|
## Further reading
|
||||||
|
|
||||||
Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>
|
Refer to the upstream docs here: <https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner>
|
||||||
|
|
|
@ -29,8 +29,7 @@ use Kubernetes's `PersistentVolume` abstraction. The following template is
|
||||||
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
||||||
other situations:
|
other situations:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
kind: PersistentVolume
|
kind: PersistentVolume
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -46,7 +45,6 @@ spec:
|
||||||
fsType: "ext4"
|
fsType: "ext4"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
|
|
||||||
|
|
||||||
If, for example, you wanted to use NFS you would just need to change the
|
If, for example, you wanted to use NFS you would just need to change the
|
||||||
`gcePersistentDisk` block to `nfs`. See
|
`gcePersistentDisk` block to `nfs`. See
|
||||||
|
@ -68,8 +66,7 @@ Now that the Kubernetes cluster knows that some storage exists, you can put a
|
||||||
claim on that storage. As with the `PersistentVolume` above, you can start
|
claim on that storage. As with the `PersistentVolume` above, you can start
|
||||||
with the `salt` template:
|
with the `salt` template:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -82,7 +79,6 @@ spec:
|
||||||
requests:
|
requests:
|
||||||
storage: {{ pillar['cluster_registry_disk_size'] }}
|
storage: {{ pillar['cluster_registry_disk_size'] }}
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
|
||||||
|
|
||||||
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
||||||
you created before will be bound to this claim (unless you have other
|
you created before will be bound to this claim (unless you have other
|
||||||
|
@ -93,8 +89,7 @@ gives you the right to use this storage until you release the claim.
|
||||||
|
|
||||||
Now we can run a Docker registry:
|
Now we can run a Docker registry:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -138,7 +133,6 @@ spec:
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: kube-registry-pvc
|
claimName: kube-registry-pvc
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
|
|
||||||
|
|
||||||
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
||||||
|
|
||||||
|
@ -146,8 +140,7 @@ spec:
|
||||||
|
|
||||||
Now that we have a registry `Pod` running, we can expose it as a Service:
|
Now that we have a registry `Pod` running, we can expose it as a Service:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -164,7 +157,6 @@ spec:
|
||||||
port: 5000
|
port: 5000
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
|
|
||||||
|
|
||||||
## Expose the registry on each node
|
## Expose the registry on each node
|
||||||
|
|
||||||
|
@ -172,8 +164,7 @@ Now that we have a running `Service`, we need to expose it onto each Kubernetes
|
||||||
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
||||||
node by creating following daemonset.
|
node by creating following daemonset.
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -207,7 +198,6 @@ spec:
|
||||||
containerPort: 80
|
containerPort: 80
|
||||||
hostPort: 5000
|
hostPort: 5000
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
|
||||||
|
|
||||||
When modifying replication-controller, service and daemon-set definitions, take
|
When modifying replication-controller, service and daemon-set definitions, take
|
||||||
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
||||||
|
@ -219,7 +209,7 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
|
||||||
You should be able to verify that it is running by hitting port 5000 with a web
|
You should be able to verify that it is running by hitting port 5000 with a web
|
||||||
browser and getting a 404 error:
|
browser and getting a 404 error:
|
||||||
|
|
||||||
``` console
|
```ShellSession
|
||||||
$ curl localhost:5000
|
$ curl localhost:5000
|
||||||
404 page not found
|
404 page not found
|
||||||
```
|
```
|
||||||
|
@ -229,7 +219,7 @@ $ curl localhost:5000
|
||||||
To use an image hosted by this registry, simply say this in your `Pod`'s
|
To use an image hosted by this registry, simply say this in your `Pod`'s
|
||||||
`spec.containers[].image` field:
|
`spec.containers[].image` field:
|
||||||
|
|
||||||
``` yaml
|
```yaml
|
||||||
image: localhost:5000/user/container
|
image: localhost:5000/user/container
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -241,7 +231,7 @@ building locally and want to push to your cluster.
|
||||||
You can use `kubectl` to set up a port-forward from your local node to a
|
You can use `kubectl` to set up a port-forward from your local node to a
|
||||||
running Pod:
|
running Pod:
|
||||||
|
|
||||||
``` console
|
```ShellSession
|
||||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
||||||
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
||||||
| grep Running | head -1 | cut -f1 -d' ')
|
| grep Running | head -1 | cut -f1 -d' ')
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
|
||||||
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
|
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
|
||||||
The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ You have to explicitly enable the MetalLB extension and set an IP address range
|
||||||
```yaml
|
```yaml
|
||||||
metallb_enabled: true
|
metallb_enabled: true
|
||||||
metallb_speaker_enabled: true
|
metallb_speaker_enabled: true
|
||||||
|
metallb_avoid_buggy_ips: true
|
||||||
metallb_ip_range:
|
metallb_ip_range:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
```
|
```
|
||||||
|
@ -69,16 +70,17 @@ metallb_peers:
|
||||||
|
|
||||||
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
|
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
|
||||||
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
|
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
|
||||||
In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_speaker_enabled: false
|
metallb_speaker_enabled: false
|
||||||
|
metallb_avoid_buggy_ips: true
|
||||||
metallb_ip_range:
|
metallb_ip_range:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
|
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have additional loadbalancer IP pool in `metallb_additional_address_pools`, ensure to add them to the list.
|
If you have additional loadbalancer IP pool in `metallb_additional_address_pools` , ensure to add them to the list.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_speaker_enabled: false
|
metallb_speaker_enabled: false
|
||||||
|
@ -90,11 +92,13 @@ metallb_additional_address_pools:
|
||||||
- 10.6.0.0/16
|
- 10.6.0.0/16
|
||||||
protocol: "bgp"
|
protocol: "bgp"
|
||||||
auto_assign: false
|
auto_assign: false
|
||||||
|
avoid_buggy_ips: true
|
||||||
kube_service_pool_2:
|
kube_service_pool_2:
|
||||||
ip_range:
|
ip_range:
|
||||||
- 10.10.0.0/16
|
- 10.10.0.0/16
|
||||||
protocol: "bgp"
|
protocol: "bgp"
|
||||||
auto_assign: false
|
auto_assign: false
|
||||||
|
avoid_buggy_ips: true
|
||||||
calico_advertise_service_loadbalancer_ips:
|
calico_advertise_service_loadbalancer_ips:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
- 10.6.0.0/16
|
- 10.6.0.0/16
|
||||||
|
|
66
docs/mirror.md
Normal file
66
docs/mirror.md
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
# Public Download Mirror
|
||||||
|
|
||||||
|
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
|
||||||
|
|
||||||
|
## Configuring Kubespray to use a mirror site
|
||||||
|
|
||||||
|
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
gcr_image_repo: "gcr.m.daocloud.io"
|
||||||
|
kube_image_repo: "k8s.m.daocloud.io"
|
||||||
|
docker_image_repo: "docker.m.daocloud.io"
|
||||||
|
quay_image_repo: "quay.m.daocloud.io"
|
||||||
|
github_image_repo: "ghcr.m.daocloud.io"
|
||||||
|
|
||||||
|
files_repo: "https://files.m.daocloud.io"
|
||||||
|
```
|
||||||
|
|
||||||
|
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
|
||||||
|
You can replace the `m.daocloud.io` with any site you want.
|
||||||
|
|
||||||
|
## Example Usage Full Steps
|
||||||
|
|
||||||
|
You can follow the full steps to use the kubesray with mirror. for example:
|
||||||
|
|
||||||
|
Install Ansible according to Ansible installation guide then run the following steps:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
|
cp -rfp inventory/sample inventory/mycluster
|
||||||
|
|
||||||
|
# Update Ansible inventory file with inventory builder
|
||||||
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
|
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
|
# Use the download mirror
|
||||||
|
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
|
||||||
|
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
|
||||||
|
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
|
||||||
|
gcr_image_repo: "gcr.m.daocloud.io"
|
||||||
|
kube_image_repo: "k8s.m.daocloud.io"
|
||||||
|
docker_image_repo: "docker.m.daocloud.io"
|
||||||
|
quay_image_repo: "quay.m.daocloud.io"
|
||||||
|
github_image_repo: "ghcr.m.daocloud.io"
|
||||||
|
files_repo: "https://files.m.daocloud.io"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
|
# installing packages and interacting with various systemd daemons.
|
||||||
|
# Without --become the playbook will fail to run!
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
|
||||||
|
|
||||||
|
## Community-run mirror sites
|
||||||
|
|
||||||
|
DaoCloud(China)
|
||||||
|
|
||||||
|
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
|
||||||
|
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)
|
|
@ -124,7 +124,7 @@ to
|
||||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
||||||
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
||||||
|
|
||||||
### 3) Edit cluster-info configmap in kube-system namespace
|
### 3) Edit cluster-info configmap in kube-public namespace
|
||||||
|
|
||||||
`kubectl edit cm -n kube-public cluster-info`
|
`kubectl edit cm -n kube-public cluster-info`
|
||||||
|
|
||||||
|
|
11
docs/ntp.md
11
docs/ntp.md
|
@ -12,7 +12,7 @@ ntp_enabled: true
|
||||||
|
|
||||||
The NTP service would be enabled and sync time automatically.
|
The NTP service would be enabled and sync time automatically.
|
||||||
|
|
||||||
## Custimize the NTP configure file
|
## Customize the NTP configure file
|
||||||
|
|
||||||
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
|
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
|
||||||
|
|
||||||
|
@ -26,6 +26,15 @@ ntp_servers:
|
||||||
- "3.your-ntp-server.org iburst"
|
- "3.your-ntp-server.org iburst"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Setting the TimeZone
|
||||||
|
|
||||||
|
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_enabled: true
|
||||||
|
ntp_timezone: Etc/UTC
|
||||||
|
```
|
||||||
|
|
||||||
## Advanced Configure
|
## Advanced Configure
|
||||||
|
|
||||||
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
|
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
|
||||||
|
|
|
@ -1,12 +1,25 @@
|
||||||
# Offline environment
|
# Offline environment
|
||||||
|
|
||||||
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
|
In case your servers don't have access to the internet directly (for example
|
||||||
|
when deploying on premises with security constraints), you need to get the
|
||||||
|
following artifacts in advance from another environment where has access to the internet.
|
||||||
|
|
||||||
|
* Some static files (zips and binaries)
|
||||||
|
* OS packages (rpm/deb files)
|
||||||
|
* Container images used by Kubespray. Exhaustive list depends on your setup
|
||||||
|
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
||||||
|
* [Optional] Helm chart files (only required if `helm_enabled=true`)
|
||||||
|
|
||||||
|
Then you need to setup the following services on your offline environment:
|
||||||
|
|
||||||
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
|
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
|
||||||
* an internal Yum/Deb repository for OS packages
|
* an internal Yum/Deb repository for OS packages
|
||||||
* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
|
* an internal container image registry that need to be populated with all container images used by Kubespray
|
||||||
* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
* [Optional] an internal PyPi server for python packages used by Kubespray
|
||||||
* [Optional] an internal Helm registry (only required if `helm_enabled=true`)
|
* [Optional] an internal Helm registry for Helm chart files
|
||||||
|
|
||||||
|
You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script.
|
||||||
|
In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md).
|
||||||
|
|
||||||
## Configure Inventory
|
## Configure Inventory
|
||||||
|
|
||||||
|
@ -23,7 +36,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||||
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||||
# etcd is optional if you **DON'T** use etcd_deployment=host
|
# etcd is optional if you **DON'T** use etcd_deployment=host
|
||||||
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||||
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
# If using Calico
|
# If using Calico
|
||||||
|
|
11
docs/openeuler.md
Normal file
11
docs/openeuler.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# OpenEuler
|
||||||
|
|
||||||
|
[OpenEuler](https://www.openeuler.org/en/) Linux is supported with docker and containerd runtimes.
|
||||||
|
|
||||||
|
**Note:** that OpenEuler Linux is not currently covered in kubespray CI and
|
||||||
|
support for it is currently considered experimental.
|
||||||
|
|
||||||
|
At present, only `openEuler 22.03 LTS` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
|
||||||
|
|
||||||
|
There are no special considerations for using OpenEuler Linux as the target OS
|
||||||
|
for Kubespray deployments.
|
|
@ -34,52 +34,6 @@ Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expecte
|
||||||
|
|
||||||
Unless you are using calico or kube-router you can now run the playbook.
|
Unless you are using calico or kube-router you can now run the playbook.
|
||||||
|
|
||||||
**Additional step needed when using calico or kube-router:**
|
|
||||||
|
|
||||||
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
|
|
||||||
|
|
||||||
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
|
|
||||||
|
|
||||||
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
|
|
||||||
|
|
||||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
openstack server list --project YOUR_PROJECT
|
|
||||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
|
||||||
| ID | Name | Tenant ID | Status | Power State |
|
|
||||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
|
||||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
|
||||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
openstack port list -c id -c device_id --project YOUR_PROJECT
|
|
||||||
+--------------------------------------+--------------------------------------+
|
|
||||||
| id | device_id |
|
|
||||||
+--------------------------------------+--------------------------------------+
|
|
||||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
|
||||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
|
||||||
```
|
|
||||||
|
|
||||||
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# allow kube_service_addresses and kube_pods_subnet network
|
|
||||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
|
||||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
|
||||||
```
|
|
||||||
|
|
||||||
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
|
||||||
|
|
||||||
## The external cloud provider
|
## The external cloud provider
|
||||||
|
|
||||||
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
|
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
|
||||||
|
@ -156,3 +110,49 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
|
||||||
|
|
||||||
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
|
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
|
||||||
- Run the `cluster.yml` playbook
|
- Run the `cluster.yml` playbook
|
||||||
|
|
||||||
|
## Additional step needed when using calico or kube-router
|
||||||
|
|
||||||
|
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
|
||||||
|
|
||||||
|
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
|
||||||
|
|
||||||
|
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
|
||||||
|
|
||||||
|
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openstack server list --project YOUR_PROJECT
|
||||||
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
|
| ID | Name | Tenant ID | Status | Power State |
|
||||||
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
|
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
|
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||||
|
+--------------------------------------+--------------------------------------+
|
||||||
|
| id | device_id |
|
||||||
|
+--------------------------------------+--------------------------------------+
|
||||||
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
```
|
||||||
|
|
||||||
|
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
|
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
|
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
|
```
|
||||||
|
|
||||||
|
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can finally run the playbook.
|
||||||
|
|
|
@ -14,8 +14,8 @@ hands-on guide to get started with Kubespray.
|
||||||
|
|
||||||
## Cluster Details
|
## Cluster Details
|
||||||
|
|
||||||
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x
|
* [kubespray](https://github.com/kubernetes-sigs/kubespray)
|
||||||
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
|
* [kubernetes](https://github.com/kubernetes/kubernetes)
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
|
@ -252,11 +252,7 @@ Ansible will now execute the playbook, this can take up to 20 minutes.
|
||||||
We will leverage a kubeconfig file from one of the controller nodes to access
|
We will leverage a kubeconfig file from one of the controller nodes to access
|
||||||
the cluster as administrator from our local workstation.
|
the cluster as administrator from our local workstation.
|
||||||
|
|
||||||
> In this simplified set-up, we did not include a load balancer that usually
|
> In this simplified set-up, we did not include a load balancer that usually sits on top of the three controller nodes for a high available API server endpoint. In this simplified tutorial we connect directly to one of the three controllers.
|
||||||
sits on top of the
|
|
||||||
three controller nodes for a high available API server endpoint. In this
|
|
||||||
simplified tutorial we connect directly to one of the three
|
|
||||||
controllers.
|
|
||||||
|
|
||||||
First, we need to edit the permission of the kubeconfig file on one of the
|
First, we need to edit the permission of the kubeconfig file on one of the
|
||||||
controller nodes:
|
controller nodes:
|
||||||
|
@ -470,7 +466,7 @@ kubectl logs $POD_NAME
|
||||||
|
|
||||||
#### Exec
|
#### Exec
|
||||||
|
|
||||||
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container).
|
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container).
|
||||||
|
|
||||||
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
||||||
|
|
||||||
|
|
9
docs/uoslinux.md
Normal file
9
docs/uoslinux.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# UOS Linux
|
||||||
|
|
||||||
|
UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes.
|
||||||
|
|
||||||
|
**Note:** that UOS Linux is not currently covered in kubespray CI and
|
||||||
|
support for it is currently considered experimental.
|
||||||
|
|
||||||
|
There are no special considerations for using UOS Linux as the target OS
|
||||||
|
for Kubespray deployments.
|
|
@ -58,7 +58,7 @@ see [download documentation](/docs/downloads.md).
|
||||||
|
|
||||||
The following is an example of setting up and running kubespray using `vagrant`.
|
The following is an example of setting up and running kubespray using `vagrant`.
|
||||||
For repeated runs, you could save the script to a file in the root of the
|
For repeated runs, you could save the script to a file in the root of the
|
||||||
kubespray and run it by executing 'source <name_of_the_file>.
|
kubespray and run it by executing `source <name_of_the_file>`.
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
# use virtualenv to install all python requirements
|
# use virtualenv to install all python requirements
|
||||||
|
|
34
docs/vars.md
34
docs/vars.md
|
@ -15,7 +15,7 @@ Some variables of note include:
|
||||||
|
|
||||||
* *calico_version* - Specify version of Calico to use
|
* *calico_version* - Specify version of Calico to use
|
||||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||||
* *docker_version* - Specify version of Docker to used (should be quoted
|
* *docker_version* - Specify version of Docker to use (should be quoted
|
||||||
string). Must match one of the keys defined for *docker_versioned_pkg*
|
string). Must match one of the keys defined for *docker_versioned_pkg*
|
||||||
in `roles/container-engine/docker/vars/*.yml`.
|
in `roles/container-engine/docker/vars/*.yml`.
|
||||||
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
|
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
|
||||||
|
@ -28,6 +28,7 @@ Some variables of note include:
|
||||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||||
* *kube_version* - Specify a given Kubernetes version
|
* *kube_version* - Specify a given Kubernetes version
|
||||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||||
|
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
|
||||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||||
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
|
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
|
||||||
|
|
||||||
|
@ -81,7 +82,7 @@ following default cluster parameters:
|
||||||
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
||||||
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
||||||
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
||||||
|
|
||||||
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
||||||
|
|
||||||
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
||||||
|
@ -99,7 +100,7 @@ following default cluster parameters:
|
||||||
|
|
||||||
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
|
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
|
||||||
(default is k8s_external.local)
|
(default is k8s_external.local)
|
||||||
|
|
||||||
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
|
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
|
||||||
on the CoreDNS service.
|
on the CoreDNS service.
|
||||||
|
|
||||||
|
@ -166,7 +167,9 @@ variables to match your requirements.
|
||||||
addition to Kubespray deployed DNS
|
addition to Kubespray deployed DNS
|
||||||
* *nameservers* - Array of DNS servers configured for use by hosts
|
* *nameservers* - Array of DNS servers configured for use by hosts
|
||||||
* *searchdomains* - Array of up to 4 search domains
|
* *searchdomains* - Array of up to 4 search domains
|
||||||
|
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
|
||||||
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
|
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
|
||||||
|
* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers
|
||||||
|
|
||||||
For more information, see [DNS
|
For more information, see [DNS
|
||||||
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
|
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
|
||||||
|
@ -175,26 +178,47 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||||
|
|
||||||
* *docker_options* - Commonly used to set
|
* *docker_options* - Commonly used to set
|
||||||
``--insecure-registry=myregistry.mydomain:5000``
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
|
|
||||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||||
|
|
||||||
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
|
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||||
|
|
||||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars.
|
||||||
|
|
||||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||||
that correspond to each node.
|
that correspond to each node.
|
||||||
|
|
||||||
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
|
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
|
||||||
By default autodetection is used to match container manager configuration.
|
By default autodetection is used to match container manager configuration.
|
||||||
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
|
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
|
||||||
|
|
||||||
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
||||||
from the kube-apiserver when the certificate expiration approaches.
|
from the kube-apiserver when the certificate expiration approaches.
|
||||||
|
|
||||||
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
|
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
|
||||||
from the kube-apiserver when the certificate expiration approaches.
|
from the kube-apiserver when the certificate expiration approaches.
|
||||||
**Note** that server certificates are **not** approved automatically. Approve them manually
|
**Note** that server certificates are **not** approved automatically. Approve them manually
|
||||||
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
|
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
|
||||||
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
|
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
|
||||||
|
|
||||||
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
|
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
|
||||||
|
|
||||||
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
||||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
|
||||||
|
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
|
||||||
|
|
||||||
|
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
|
||||||
|
|
||||||
|
* *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`.
|
||||||
|
|
||||||
|
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
|
||||||
|
|
||||||
|
* *node_labels* - Labels applied to nodes via `kubectl label node`.
|
||||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
*node_labels* can only be defined as a dict:
|
*node_labels* can only be defined as a dict:
|
||||||
|
|
||||||
|
|
|
@ -31,12 +31,13 @@ You need to source the vSphere credentials you use to deploy your machines that
|
||||||
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
|
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
|
||||||
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use |
|
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use |
|
||||||
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use |
|
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use |
|
||||||
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrat image tag to use |
|
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrar image tag to use |
|
||||||
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
|
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
|
||||||
| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use
|
| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use
|
||||||
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
|
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
|
||||||
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
|
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
|
||||||
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
|
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
|
||||||
|
| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run |
|
||||||
|
|
||||||
## Usage example
|
## Usage example
|
||||||
|
|
||||||
|
|
|
@ -21,14 +21,14 @@ After this step you should have:
|
||||||
|
|
||||||
### Kubespray configuration
|
### Kubespray configuration
|
||||||
|
|
||||||
First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
|
First in `inventory/sample/group_vars/all/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
cloud_provider: "external"
|
cloud_provider: "external"
|
||||||
external_cloud_provider: "vsphere"
|
external_cloud_provider: "vsphere"
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
|
Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
|
||||||
|
|
||||||
| Variable | Required | Type | Choices | Default | Comment |
|
| Variable | Required | Type | Choices | Default | Comment |
|
||||||
|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------|
|
|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------|
|
||||||
|
|
140
inventory/c12s-sample/group_vars/all/all.yml
Normal file
140
inventory/c12s-sample/group_vars/all/all.yml
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
---
|
||||||
|
## Directory where the binaries will be installed
|
||||||
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
|
## The access_ip variable is used to define how other nodes should access
|
||||||
|
## the node. This is used in flannel to allow other flannel nodes to see
|
||||||
|
## this node for example. The access_ip is really useful AWS and Google
|
||||||
|
## environments where the nodes are accessed remotely by the "public" ip,
|
||||||
|
## but don't know about that address themselves.
|
||||||
|
# access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
|
||||||
|
## External LB example config
|
||||||
|
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||||
|
# loadbalancer_apiserver:
|
||||||
|
# address: 1.2.3.4
|
||||||
|
# port: 1234
|
||||||
|
|
||||||
|
## Internal loadbalancers for apiservers
|
||||||
|
# loadbalancer_apiserver_localhost: true
|
||||||
|
# valid options are "nginx" or "haproxy"
|
||||||
|
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
||||||
|
|
||||||
|
## If the cilium is going to be used in strict mode, we can use the
|
||||||
|
## localhost connection and not use the external LB. If this parameter is
|
||||||
|
## not specified, the first node to connect to kubeapi will be used.
|
||||||
|
# use_localhost_as_kubeapi_loadbalancer: true
|
||||||
|
|
||||||
|
## Local loadbalancer should use this port
|
||||||
|
## And must be set port 6443
|
||||||
|
loadbalancer_apiserver_port: 6443
|
||||||
|
|
||||||
|
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
|
||||||
|
loadbalancer_apiserver_healthcheck_port: 8081
|
||||||
|
|
||||||
|
### OTHER OPTIONAL VARIABLES
|
||||||
|
|
||||||
|
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
|
||||||
|
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
|
||||||
|
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
|
||||||
|
# disable_host_nameservers: false
|
||||||
|
|
||||||
|
## Upstream dns servers
|
||||||
|
# upstream_dns_servers:
|
||||||
|
# - 8.8.8.8
|
||||||
|
# - 8.8.4.4
|
||||||
|
|
||||||
|
## There are some changes specific to the cloud providers
|
||||||
|
## for instance we need to encapsulate packets with some network plugins
|
||||||
|
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||||
|
## When openstack is used make sure to source in the openstack credentials
|
||||||
|
## like you would do when using openstack-client before starting the playbook.
|
||||||
|
# cloud_provider:
|
||||||
|
|
||||||
|
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
|
||||||
|
## Supported cloud controllers are: 'openstack', 'vsphere' and 'hcloud'
|
||||||
|
## When openstack or vsphere are used make sure to source in the required fields
|
||||||
|
# external_cloud_provider:
|
||||||
|
|
||||||
|
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||||
|
# http_proxy: ""
|
||||||
|
# https_proxy: ""
|
||||||
|
|
||||||
|
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||||
|
# no_proxy: ""
|
||||||
|
|
||||||
|
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||||
|
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||||
|
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||||
|
# download_validate_certs: False
|
||||||
|
|
||||||
|
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||||
|
# additional_no_proxy: ""
|
||||||
|
|
||||||
|
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
|
||||||
|
## skip_http_proxy_on_os_packages to true
|
||||||
|
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
|
||||||
|
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
|
||||||
|
# skip_http_proxy_on_os_packages: false
|
||||||
|
|
||||||
|
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
||||||
|
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
||||||
|
## no_proxy variable, set below to true:
|
||||||
|
no_proxy_exclude_workers: false
|
||||||
|
|
||||||
|
## Certificate Management
|
||||||
|
## This setting determines whether certs are generated via scripts.
|
||||||
|
## Chose 'none' if you provide your own certificates.
|
||||||
|
## Option is "script", "none"
|
||||||
|
# cert_management: script
|
||||||
|
|
||||||
|
## Set to true to allow pre-checks to fail and continue deployment
|
||||||
|
# ignore_assert_errors: false
|
||||||
|
|
||||||
|
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||||
|
# kube_read_only_port: 10255
|
||||||
|
|
||||||
|
## Set true to download and cache container
|
||||||
|
# download_container: true
|
||||||
|
|
||||||
|
## Deploy container engine
|
||||||
|
# Set false if you want to deploy container engine manually.
|
||||||
|
# deploy_container_engine: true
|
||||||
|
|
||||||
|
## Red Hat Enterprise Linux subscription registration
|
||||||
|
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
|
||||||
|
## Update RHEL subscription purpose usage, role and SLA if necessary
|
||||||
|
# rh_subscription_username: ""
|
||||||
|
# rh_subscription_password: ""
|
||||||
|
# rh_subscription_org_id: ""
|
||||||
|
# rh_subscription_activation_key: ""
|
||||||
|
# rh_subscription_usage: "Development"
|
||||||
|
# rh_subscription_role: "Red Hat Enterprise Server"
|
||||||
|
# rh_subscription_sla: "Self-Support"
|
||||||
|
|
||||||
|
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
||||||
|
# ping_access_ip: true
|
||||||
|
|
||||||
|
# sysctl_file_path to add sysctl conf to
|
||||||
|
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
||||||
|
|
||||||
|
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
||||||
|
kube_webhook_token_auth: false
|
||||||
|
kube_webhook_token_auth_url_skip_tls_verify: false
|
||||||
|
# kube_webhook_token_auth_url: https://...
|
||||||
|
## base64-encoded string of the webhook's CA certificate
|
||||||
|
# kube_webhook_token_auth_ca_data: "LS0t..."
|
||||||
|
|
||||||
|
## NTP Settings
|
||||||
|
# Start the ntpd or chrony service and enable it at system boot.
|
||||||
|
ntp_enabled: false
|
||||||
|
ntp_manage_config: false
|
||||||
|
ntp_servers:
|
||||||
|
- "0.pool.ntp.org iburst"
|
||||||
|
- "1.pool.ntp.org iburst"
|
||||||
|
- "2.pool.ntp.org iburst"
|
||||||
|
- "3.pool.ntp.org iburst"
|
||||||
|
|
||||||
|
## Used to control no_log attribute
|
||||||
|
unsafe_show_logs: false
|
9
inventory/c12s-sample/group_vars/all/aws.yml
Normal file
9
inventory/c12s-sample/group_vars/all/aws.yml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
|
||||||
|
## and configure the parameters below
|
||||||
|
# aws_ebs_csi_enabled: true
|
||||||
|
# aws_ebs_csi_enable_volume_scheduling: true
|
||||||
|
# aws_ebs_csi_enable_volume_snapshot: false
|
||||||
|
# aws_ebs_csi_enable_volume_resizing: false
|
||||||
|
# aws_ebs_csi_controller_replicas: 1
|
||||||
|
# aws_ebs_csi_plugin_image_tag: latest
|
||||||
|
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
|
40
inventory/c12s-sample/group_vars/all/azure.yml
Normal file
40
inventory/c12s-sample/group_vars/all/azure.yml
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
## When azure is used, you need to also set the following variables.
|
||||||
|
## see docs/azure.md for details on how to get these values
|
||||||
|
|
||||||
|
# azure_cloud:
|
||||||
|
# azure_tenant_id:
|
||||||
|
# azure_subscription_id:
|
||||||
|
# azure_aad_client_id:
|
||||||
|
# azure_aad_client_secret:
|
||||||
|
# azure_resource_group:
|
||||||
|
# azure_location:
|
||||||
|
# azure_subnet_name:
|
||||||
|
# azure_security_group_name:
|
||||||
|
# azure_security_group_resource_group:
|
||||||
|
# azure_vnet_name:
|
||||||
|
# azure_vnet_resource_group:
|
||||||
|
# azure_route_table_name:
|
||||||
|
# azure_route_table_resource_group:
|
||||||
|
# supported values are 'standard' or 'vmss'
|
||||||
|
# azure_vmtype: standard
|
||||||
|
|
||||||
|
## Azure Disk CSI credentials and parameters
|
||||||
|
## see docs/azure-csi.md for details on how to get these values
|
||||||
|
|
||||||
|
# azure_csi_tenant_id:
|
||||||
|
# azure_csi_subscription_id:
|
||||||
|
# azure_csi_aad_client_id:
|
||||||
|
# azure_csi_aad_client_secret:
|
||||||
|
# azure_csi_location:
|
||||||
|
# azure_csi_resource_group:
|
||||||
|
# azure_csi_vnet_name:
|
||||||
|
# azure_csi_vnet_resource_group:
|
||||||
|
# azure_csi_subnet_name:
|
||||||
|
# azure_csi_security_group_name:
|
||||||
|
# azure_csi_use_instance_metadata:
|
||||||
|
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
|
||||||
|
|
||||||
|
## To enable Azure Disk CSI, uncomment below
|
||||||
|
# azure_csi_enabled: true
|
||||||
|
# azure_csi_controller_replicas: 1
|
||||||
|
# azure_csi_plugin_image_tag: latest
|
50
inventory/c12s-sample/group_vars/all/containerd.yml
Normal file
50
inventory/c12s-sample/group_vars/all/containerd.yml
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
---
|
||||||
|
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||||
|
|
||||||
|
# containerd_storage_dir: "/var/lib/containerd"
|
||||||
|
# containerd_state_dir: "/run/containerd"
|
||||||
|
# containerd_oom_score: 0
|
||||||
|
|
||||||
|
# containerd_default_runtime: "runc"
|
||||||
|
# containerd_snapshotter: "native"
|
||||||
|
|
||||||
|
# containerd_runc_runtime:
|
||||||
|
# name: runc
|
||||||
|
# type: "io.containerd.runc.v2"
|
||||||
|
# engine: ""
|
||||||
|
# root: ""
|
||||||
|
|
||||||
|
# containerd_additional_runtimes:
|
||||||
|
# Example for Kata Containers as additional runtime:
|
||||||
|
# - name: kata
|
||||||
|
# type: "io.containerd.kata.v2"
|
||||||
|
# engine: ""
|
||||||
|
# root: ""
|
||||||
|
|
||||||
|
# containerd_grpc_max_recv_message_size: 16777216
|
||||||
|
# containerd_grpc_max_send_message_size: 16777216
|
||||||
|
|
||||||
|
# containerd_debug_level: "info"
|
||||||
|
|
||||||
|
# containerd_metrics_address: ""
|
||||||
|
|
||||||
|
# containerd_metrics_grpc_histogram: false
|
||||||
|
|
||||||
|
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||||
|
## Can be ipaddress and domain_name.
|
||||||
|
## example define mirror.registry.io or 172.19.16.11:5000
|
||||||
|
## set "name": "url". insecure url must be started http://
|
||||||
|
## Port number is also needed if the default HTTPS port is not used.
|
||||||
|
# containerd_insecure_registries:
|
||||||
|
# "localhost": "http://127.0.0.1"
|
||||||
|
# "172.19.16.11:5000": "http://172.19.16.11:5000"
|
||||||
|
|
||||||
|
# containerd_registries:
|
||||||
|
# "docker.io": "https://registry-1.docker.io"
|
||||||
|
|
||||||
|
# containerd_max_container_log_line_size: -1
|
||||||
|
|
||||||
|
# containerd_registry_auth:
|
||||||
|
# - registry: 10.0.0.2:5000
|
||||||
|
# username: user
|
||||||
|
# password: pass
|
2
inventory/c12s-sample/group_vars/all/coreos.yml
Normal file
2
inventory/c12s-sample/group_vars/all/coreos.yml
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
## Does coreos need auto upgrade, default is true
|
||||||
|
# coreos_auto_upgrade: true
|
6
inventory/c12s-sample/group_vars/all/cri-o.yml
Normal file
6
inventory/c12s-sample/group_vars/all/cri-o.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# crio_insecure_registries:
|
||||||
|
# - 10.0.0.2:5000
|
||||||
|
# crio_registry_auth:
|
||||||
|
# - registry: 10.0.0.2:5000
|
||||||
|
# username: user
|
||||||
|
# password: pass
|
59
inventory/c12s-sample/group_vars/all/docker.yml
Normal file
59
inventory/c12s-sample/group_vars/all/docker.yml
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
---
|
||||||
|
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||||
|
## Please note that overlay2 is only supported on newer kernels
|
||||||
|
# docker_storage_options: -s overlay2
|
||||||
|
|
||||||
|
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
|
||||||
|
docker_container_storage_setup: false
|
||||||
|
|
||||||
|
## It must be define a disk path for docker_container_storage_setup_devs.
|
||||||
|
## Otherwise docker-storage-setup will be executed incorrectly.
|
||||||
|
# docker_container_storage_setup_devs: /dev/vdb
|
||||||
|
|
||||||
|
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
|
||||||
|
## Valid options are systemd or cgroupfs, default is systemd
|
||||||
|
# docker_cgroup_driver: systemd
|
||||||
|
|
||||||
|
## Only set this if you have more than 3 nameservers:
|
||||||
|
## If true Kubespray will only use the first 3, otherwise it will fail
|
||||||
|
docker_dns_servers_strict: false
|
||||||
|
|
||||||
|
# Path used to store Docker data
|
||||||
|
docker_daemon_graph: "/var/lib/docker"
|
||||||
|
|
||||||
|
## Used to set docker daemon iptables options to true
|
||||||
|
docker_iptables_enabled: "false"
|
||||||
|
|
||||||
|
# Docker log options
|
||||||
|
# Rotate container stderr/stdout logs at 50m and keep last 5
|
||||||
|
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
||||||
|
|
||||||
|
# define docker bin_dir
|
||||||
|
docker_bin_dir: "/usr/bin"
|
||||||
|
|
||||||
|
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
|
||||||
|
# kubespray deletes the docker package on each run, so caching the package makes sense
|
||||||
|
docker_rpm_keepcache: 1
|
||||||
|
|
||||||
|
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||||
|
## Can be ipaddress and domain_name.
|
||||||
|
## example define 172.19.16.11 or mirror.registry.io
|
||||||
|
# docker_insecure_registries:
|
||||||
|
# - mirror.registry.io
|
||||||
|
# - 172.19.16.11
|
||||||
|
|
||||||
|
## Add other registry,example China registry mirror.
|
||||||
|
# docker_registry_mirrors:
|
||||||
|
# - https://registry.docker-cn.com
|
||||||
|
# - https://mirror.aliyuncs.com
|
||||||
|
|
||||||
|
## If non-empty will override default system MountFlags value.
|
||||||
|
## This option takes a mount propagation flag: shared, slave
|
||||||
|
## or private, which control whether mounts in the file system
|
||||||
|
## namespace set up for docker will receive or propagate mounts
|
||||||
|
## and unmounts. Leave empty for system default
|
||||||
|
# docker_mount_flags:
|
||||||
|
|
||||||
|
## A string of extra options to pass to the docker daemon.
|
||||||
|
## This string should be exactly as you wish it to appear.
|
||||||
|
# docker_options: ""
|
16
inventory/c12s-sample/group_vars/all/etcd.yml
Normal file
16
inventory/c12s-sample/group_vars/all/etcd.yml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
## Directory where etcd data stored
|
||||||
|
etcd_data_dir: /var/lib/etcd
|
||||||
|
|
||||||
|
## Container runtime
|
||||||
|
## docker for docker, crio for cri-o and containerd for containerd.
|
||||||
|
## Additionally you can set this to kubeadm if you want to install etcd using kubeadm
|
||||||
|
## Kubeadm etcd deployment is experimental and only available for new deployments
|
||||||
|
## If this is not set, container manager will be inherited from the Kubespray defaults
|
||||||
|
## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want.
|
||||||
|
## Also this makes possible to use different container manager for etcd nodes.
|
||||||
|
# container_manager: containerd
|
||||||
|
|
||||||
|
## Settings for etcd deployment type
|
||||||
|
# Set this to docker if you are using container_manager: docker
|
||||||
|
etcd_deployment_type: host
|
10
inventory/c12s-sample/group_vars/all/gcp.yml
Normal file
10
inventory/c12s-sample/group_vars/all/gcp.yml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
## GCP compute Persistent Disk CSI Driver credentials and parameters
|
||||||
|
## See docs/gcp-pd-csi.md for information about the implementation
|
||||||
|
|
||||||
|
## Specify the path to the file containing the service account credentials
|
||||||
|
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
|
||||||
|
|
||||||
|
## To enable GCP Persistent Disk CSI driver, uncomment below
|
||||||
|
# gcp_pd_csi_enabled: true
|
||||||
|
# gcp_pd_csi_controller_replicas: 1
|
||||||
|
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
|
14
inventory/c12s-sample/group_vars/all/hcloud.yml
Normal file
14
inventory/c12s-sample/group_vars/all/hcloud.yml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
## Values for the external Hcloud Cloud Controller
|
||||||
|
# external_hcloud_cloud:
|
||||||
|
# hcloud_api_token: ""
|
||||||
|
# token_secret_name: hcloud
|
||||||
|
# with_networks: false # Use the hcloud controller-manager with networks support https://github.com/hetznercloud/hcloud-cloud-controller-manager#networks-support
|
||||||
|
# service_account_name: cloud-controller-manager
|
||||||
|
#
|
||||||
|
# controller_image_tag: "latest"
|
||||||
|
# ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
|
||||||
|
# ## Format:
|
||||||
|
# ## external_hcloud_cloud.controller_extra_args:
|
||||||
|
# ## arg1: "value1"
|
||||||
|
# ## arg2: "value2"
|
||||||
|
# controller_extra_args: {}
|
28
inventory/c12s-sample/group_vars/all/oci.yml
Normal file
28
inventory/c12s-sample/group_vars/all/oci.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
## When Oracle Cloud Infrastructure is used, set these variables
|
||||||
|
# oci_private_key:
|
||||||
|
# oci_region_id:
|
||||||
|
# oci_tenancy_id:
|
||||||
|
# oci_user_id:
|
||||||
|
# oci_user_fingerprint:
|
||||||
|
# oci_compartment_id:
|
||||||
|
# oci_vnc_id:
|
||||||
|
# oci_subnet1_id:
|
||||||
|
# oci_subnet2_id:
|
||||||
|
## Override these default/optional behaviors if you wish
|
||||||
|
# oci_security_list_management: All
|
||||||
|
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||||
|
# oci_security_lists:
|
||||||
|
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||||
|
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||||
|
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||||
|
# oci_use_instance_principals: false
|
||||||
|
# oci_cloud_controller_version: 0.6.0
|
||||||
|
## If you would like to control OCI query rate limits for the controller
|
||||||
|
# oci_rate_limit:
|
||||||
|
# rate_limit_qps_read:
|
||||||
|
# rate_limit_qps_write:
|
||||||
|
# rate_limit_bucket_read:
|
||||||
|
# rate_limit_bucket_write:
|
||||||
|
## Other optional variables
|
||||||
|
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
|
||||||
|
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
|
103
inventory/c12s-sample/group_vars/all/offline.yml
Normal file
103
inventory/c12s-sample/group_vars/all/offline.yml
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
---
|
||||||
|
## Global Offline settings
|
||||||
|
### Private Container Image Registry
|
||||||
|
# registry_host: "myprivateregisry.com"
|
||||||
|
# files_repo: "http://myprivatehttpd"
|
||||||
|
### If using CentOS, RedHat, AlmaLinux or Fedora
|
||||||
|
# yum_repo: "http://myinternalyumrepo"
|
||||||
|
### If using Debian
|
||||||
|
# debian_repo: "http://myinternaldebianrepo"
|
||||||
|
### If using Ubuntu
|
||||||
|
# ubuntu_repo: "http://myinternalubunturepo"
|
||||||
|
|
||||||
|
## Container Registry overrides
|
||||||
|
# kube_image_repo: "{{ registry_host }}"
|
||||||
|
# gcr_image_repo: "{{ registry_host }}"
|
||||||
|
# github_image_repo: "{{ registry_host }}"
|
||||||
|
# docker_image_repo: "{{ registry_host }}"
|
||||||
|
# quay_image_repo: "{{ registry_host }}"
|
||||||
|
|
||||||
|
## Kubernetes components
|
||||||
|
# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
||||||
|
# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
||||||
|
# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
||||||
|
|
||||||
|
## CNI Plugins
|
||||||
|
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||||
|
|
||||||
|
## cri-tools
|
||||||
|
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
|
||||||
|
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] Calico: If using Calico network plugin
|
||||||
|
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||||
|
# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||||
|
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||||
|
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] Cilium: If using Cilium network plugin
|
||||||
|
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] Flannel: If using Falnnel network plugin
|
||||||
|
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
|
||||||
|
|
||||||
|
# [Optional] helm: only if you set helm_enabled: true
|
||||||
|
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] crun: only if you set crun_enabled: true
|
||||||
|
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||||
|
|
||||||
|
# [Optional] kata: only if you set kata_containers_enabled: true
|
||||||
|
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
|
||||||
|
|
||||||
|
# [Optional] cri-dockerd: only if you set container_manager: docker
|
||||||
|
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
|
||||||
|
|
||||||
|
# [Optional] cri-o: only if you set container_manager: crio
|
||||||
|
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||||
|
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||||
|
|
||||||
|
# [Optional] runc,containerd: only if you set container_runtime: containerd
|
||||||
|
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
|
||||||
|
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
|
||||||
|
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
|
||||||
|
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
|
||||||
|
|
||||||
|
## CentOS/Redhat/AlmaLinux
|
||||||
|
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||||
|
### By default we enable those repo automatically
|
||||||
|
# rhel_enable_repos: false
|
||||||
|
### Docker / Containerd
|
||||||
|
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||||
|
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||||
|
|
||||||
|
## Fedora
|
||||||
|
### Docker
|
||||||
|
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
|
||||||
|
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||||
|
### Containerd
|
||||||
|
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
|
||||||
|
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||||
|
|
||||||
|
## Debian
|
||||||
|
### Docker
|
||||||
|
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
||||||
|
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
||||||
|
### Containerd
|
||||||
|
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
|
||||||
|
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
|
||||||
|
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
||||||
|
|
||||||
|
## Ubuntu
|
||||||
|
### Docker
|
||||||
|
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
|
||||||
|
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
|
||||||
|
### Containerd
|
||||||
|
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||||
|
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||||
|
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
49
inventory/c12s-sample/group_vars/all/openstack.yml
Normal file
49
inventory/c12s-sample/group_vars/all/openstack.yml
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
|
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||||
|
# openstack_blockstorage_ignore_volume_az: yes
|
||||||
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||||
|
# openstack_lbaas_enabled: True
|
||||||
|
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||||
|
## To enable automatic floating ip provisioning, specify a subnet.
|
||||||
|
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||||
|
## Override default LBaaS behavior
|
||||||
|
# openstack_lbaas_use_octavia: False
|
||||||
|
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||||
|
# openstack_lbaas_provider: "haproxy"
|
||||||
|
# openstack_lbaas_create_monitor: "yes"
|
||||||
|
# openstack_lbaas_monitor_delay: "1m"
|
||||||
|
# openstack_lbaas_monitor_timeout: "30s"
|
||||||
|
# openstack_lbaas_monitor_max_retries: "3"
|
||||||
|
|
||||||
|
## Values for the external OpenStack Cloud Controller
|
||||||
|
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
|
||||||
|
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
||||||
|
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
|
||||||
|
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
|
||||||
|
# external_openstack_lbaas_method: "ROUND_ROBIN"
|
||||||
|
# external_openstack_lbaas_provider: "octavia"
|
||||||
|
# external_openstack_lbaas_create_monitor: false
|
||||||
|
# external_openstack_lbaas_monitor_delay: "1m"
|
||||||
|
# external_openstack_lbaas_monitor_timeout: "30s"
|
||||||
|
# external_openstack_lbaas_monitor_max_retries: "3"
|
||||||
|
# external_openstack_lbaas_manage_security_groups: false
|
||||||
|
# external_openstack_lbaas_internal_lb: false
|
||||||
|
# external_openstack_network_ipv6_disabled: false
|
||||||
|
# external_openstack_network_internal_networks: []
|
||||||
|
# external_openstack_network_public_networks: []
|
||||||
|
# external_openstack_metadata_search_order: "configDrive,metadataService"
|
||||||
|
|
||||||
|
## Application credentials to authenticate against Keystone API
|
||||||
|
## Those settings will take precedence over username and password that might be set your environment
|
||||||
|
## All of them are required
|
||||||
|
# external_openstack_application_credential_name:
|
||||||
|
# external_openstack_application_credential_id:
|
||||||
|
# external_openstack_application_credential_secret:
|
||||||
|
|
||||||
|
## The tag of the external OpenStack Cloud Controller image
|
||||||
|
# external_openstack_cloud_controller_image_tag: "latest"
|
||||||
|
|
||||||
|
## To use Cinder CSI plugin to provision volumes set this value to true
|
||||||
|
## Make sure to source in the openstack credentials
|
||||||
|
# cinder_csi_enabled: true
|
||||||
|
# cinder_csi_controller_replicas: 1
|
24
inventory/c12s-sample/group_vars/all/upcloud.yml
Normal file
24
inventory/c12s-sample/group_vars/all/upcloud.yml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
## Repo for UpClouds csi-driver: https://github.com/UpCloudLtd/upcloud-csi
|
||||||
|
## To use UpClouds CSI plugin to provision volumes set this value to true
|
||||||
|
## Remember to set UPCLOUD_USERNAME and UPCLOUD_PASSWORD
|
||||||
|
# upcloud_csi_enabled: true
|
||||||
|
# upcloud_csi_controller_replicas: 1
|
||||||
|
## Override used image tags
|
||||||
|
# upcloud_csi_provisioner_image_tag: "v3.1.0"
|
||||||
|
# upcloud_csi_attacher_image_tag: "v3.4.0"
|
||||||
|
# upcloud_csi_resizer_image_tag: "v1.4.0"
|
||||||
|
# upcloud_csi_plugin_image_tag: "v0.3.3"
|
||||||
|
# upcloud_csi_node_image_tag: "v2.5.0"
|
||||||
|
# upcloud_tolerations: []
|
||||||
|
## Storage class options
|
||||||
|
# storage_classes:
|
||||||
|
# - name: standard
|
||||||
|
# is_default: true
|
||||||
|
# expand_persistent_volumes: true
|
||||||
|
# parameters:
|
||||||
|
# tier: maxiops
|
||||||
|
# - name: hdd
|
||||||
|
# is_default: false
|
||||||
|
# expand_persistent_volumes: true
|
||||||
|
# parameters:
|
||||||
|
# tier: hdd
|
32
inventory/c12s-sample/group_vars/all/vsphere.yml
Normal file
32
inventory/c12s-sample/group_vars/all/vsphere.yml
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
## Values for the external vSphere Cloud Provider
|
||||||
|
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||||
|
# external_vsphere_vcenter_port: "443"
|
||||||
|
# external_vsphere_insecure: "true"
|
||||||
|
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
|
||||||
|
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
|
||||||
|
# external_vsphere_datacenter: "DATACENTER_name"
|
||||||
|
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||||
|
|
||||||
|
## Vsphere version where located VMs
|
||||||
|
# external_vsphere_version: "6.7u3"
|
||||||
|
|
||||||
|
## Tags for the external vSphere Cloud Provider images
|
||||||
|
## gcr.io/cloud-provider-vsphere/cpi/release/manager
|
||||||
|
# external_vsphere_cloud_controller_image_tag: "latest"
|
||||||
|
## gcr.io/cloud-provider-vsphere/csi/release/syncer
|
||||||
|
# vsphere_syncer_image_tag: "v2.5.1"
|
||||||
|
## registry.k8s.io/sig-storage/csi-attacher
|
||||||
|
# vsphere_csi_attacher_image_tag: "v3.4.0"
|
||||||
|
## gcr.io/cloud-provider-vsphere/csi/release/driver
|
||||||
|
# vsphere_csi_controller: "v2.5.1"
|
||||||
|
## registry.k8s.io/sig-storage/livenessprobe
|
||||||
|
# vsphere_csi_liveness_probe_image_tag: "v2.6.0"
|
||||||
|
## registry.k8s.io/sig-storage/csi-provisioner
|
||||||
|
# vsphere_csi_provisioner_image_tag: "v3.1.0"
|
||||||
|
## registry.k8s.io/sig-storage/csi-resizer
|
||||||
|
## makes sense only for vSphere version >=7.0
|
||||||
|
# vsphere_csi_resizer_tag: "v1.3.0"
|
||||||
|
|
||||||
|
## To use vSphere CSI plugin to provision volumes set this value to true
|
||||||
|
# vsphere_csi_enabled: true
|
||||||
|
# vsphere_csi_controller_replicas: 1
|
26
inventory/c12s-sample/group_vars/etcd.yml
Normal file
26
inventory/c12s-sample/group_vars/etcd.yml
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
## Etcd auto compaction retention for mvcc key value store in hour
|
||||||
|
# etcd_compaction_retention: 0
|
||||||
|
|
||||||
|
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||||
|
# etcd_metrics: basic
|
||||||
|
|
||||||
|
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||||
|
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||||
|
## This value is only relevant when deploying etcd with `etcd_deployment_type: docker`
|
||||||
|
# etcd_memory_limit: "512M"
|
||||||
|
|
||||||
|
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||||
|
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||||
|
## etcd documentation for more information.
|
||||||
|
# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it.
|
||||||
|
# etcd_quota_backend_bytes: "2147483648"
|
||||||
|
|
||||||
|
# Maximum client request size in bytes the server will accept.
|
||||||
|
# etcd is designed to handle small key value pairs typical for metadata.
|
||||||
|
# Larger requests will work, but may increase the latency of other requests
|
||||||
|
# etcd_max_request_bytes: "1572864"
|
||||||
|
|
||||||
|
### ETCD: disable peer client cert authentication.
|
||||||
|
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||||
|
# etcd_peer_client_auth: true
|
228
inventory/c12s-sample/group_vars/k8s_cluster/addons.yml
Normal file
228
inventory/c12s-sample/group_vars/k8s_cluster/addons.yml
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
---
|
||||||
|
# Kubernetes dashboard
|
||||||
|
# RBAC required. see docs/getting-started.md for access details.
|
||||||
|
dashboard_enabled: true
|
||||||
|
|
||||||
|
# Helm deployment
|
||||||
|
helm_enabled: false
|
||||||
|
|
||||||
|
# Registry deployment
|
||||||
|
registry_enabled: false
|
||||||
|
# registry_namespace: kube-system
|
||||||
|
# registry_storage_class: ""
|
||||||
|
# registry_disk_size: "10Gi"
|
||||||
|
|
||||||
|
# Metrics Server deployment
|
||||||
|
metrics_server_enabled: false
|
||||||
|
# metrics_server_container_port: 4443
|
||||||
|
# metrics_server_kubelet_insecure_tls: true
|
||||||
|
# metrics_server_metric_resolution: 15s
|
||||||
|
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
|
||||||
|
# metrics_server_host_network: false
|
||||||
|
# metrics_server_replicas: 1
|
||||||
|
|
||||||
|
# Rancher Local Path Provisioner
|
||||||
|
local_path_provisioner_enabled: false
|
||||||
|
# local_path_provisioner_namespace: "local-path-storage"
|
||||||
|
# local_path_provisioner_storage_class: "local-path"
|
||||||
|
# local_path_provisioner_reclaim_policy: Delete
|
||||||
|
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||||
|
# local_path_provisioner_debug: false
|
||||||
|
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
|
||||||
|
# local_path_provisioner_image_tag: "v0.0.22"
|
||||||
|
# local_path_provisioner_helper_image_repo: "busybox"
|
||||||
|
# local_path_provisioner_helper_image_tag: "latest"
|
||||||
|
|
||||||
|
# Local volume provisioner deployment
|
||||||
|
local_volume_provisioner_enabled: false
|
||||||
|
# local_volume_provisioner_namespace: kube-system
|
||||||
|
# local_volume_provisioner_nodelabels:
|
||||||
|
# - kubernetes.io/hostname
|
||||||
|
# - topology.kubernetes.io/region
|
||||||
|
# - topology.kubernetes.io/zone
|
||||||
|
# local_volume_provisioner_storage_classes:
|
||||||
|
# local-storage:
|
||||||
|
# host_dir: /mnt/disks
|
||||||
|
# mount_dir: /mnt/disks
|
||||||
|
# volume_mode: Filesystem
|
||||||
|
# fs_type: ext4
|
||||||
|
# fast-disks:
|
||||||
|
# host_dir: /mnt/fast-disks
|
||||||
|
# mount_dir: /mnt/fast-disks
|
||||||
|
# block_cleaner_command:
|
||||||
|
# - "/scripts/shred.sh"
|
||||||
|
# - "2"
|
||||||
|
# volume_mode: Filesystem
|
||||||
|
# fs_type: ext4
|
||||||
|
# local_volume_provisioner_tolerations:
|
||||||
|
# - effect: NoSchedule
|
||||||
|
# operator: Exists
|
||||||
|
|
||||||
|
# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
|
||||||
|
# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
|
||||||
|
# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
|
||||||
|
# csi_snapshot_controller_enabled: false
|
||||||
|
# csi snapshot namespace
|
||||||
|
# snapshot_controller_namespace: kube-system
|
||||||
|
|
||||||
|
# CephFS provisioner deployment
|
||||||
|
cephfs_provisioner_enabled: false
|
||||||
|
# cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||||
|
# cephfs_provisioner_cluster: ceph
|
||||||
|
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||||
|
# cephfs_provisioner_admin_id: admin
|
||||||
|
# cephfs_provisioner_secret: secret
|
||||||
|
# cephfs_provisioner_storage_class: cephfs
|
||||||
|
# cephfs_provisioner_reclaim_policy: Delete
|
||||||
|
# cephfs_provisioner_claim_root: /volumes
|
||||||
|
# cephfs_provisioner_deterministic_names: true
|
||||||
|
|
||||||
|
# RBD provisioner deployment
|
||||||
|
rbd_provisioner_enabled: false
|
||||||
|
# rbd_provisioner_namespace: rbd-provisioner
|
||||||
|
# rbd_provisioner_replicas: 2
|
||||||
|
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||||
|
# rbd_provisioner_pool: kube
|
||||||
|
# rbd_provisioner_admin_id: admin
|
||||||
|
# rbd_provisioner_secret_name: ceph-secret-admin
|
||||||
|
# rbd_provisioner_secret: ceph-key-admin
|
||||||
|
# rbd_provisioner_user_id: kube
|
||||||
|
# rbd_provisioner_user_secret_name: ceph-secret-user
|
||||||
|
# rbd_provisioner_user_secret: ceph-key-user
|
||||||
|
# rbd_provisioner_user_secret_namespace: rbd-provisioner
|
||||||
|
# rbd_provisioner_fs_type: ext4
|
||||||
|
# rbd_provisioner_image_format: "2"
|
||||||
|
# rbd_provisioner_image_features: layering
|
||||||
|
# rbd_provisioner_storage_class: rbd
|
||||||
|
# rbd_provisioner_reclaim_policy: Delete
|
||||||
|
|
||||||
|
# Nginx ingress controller deployment
|
||||||
|
ingress_nginx_enabled: true
|
||||||
|
# ingress_nginx_host_network: false
|
||||||
|
ingress_publish_status_address: ""
|
||||||
|
# ingress_nginx_nodeselector:
|
||||||
|
# kubernetes.io/os: "linux"
|
||||||
|
# ingress_nginx_tolerations:
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - key: "node-role.kubernetes.io/control-plane"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# ingress_nginx_namespace: "ingress-nginx"
|
||||||
|
# ingress_nginx_insecure_port: 80
|
||||||
|
# ingress_nginx_secure_port: 443
|
||||||
|
# ingress_nginx_configmap:
|
||||||
|
# map-hash-bucket-size: "128"
|
||||||
|
# ssl-protocols: "TLSv1.2 TLSv1.3"
|
||||||
|
# ingress_nginx_configmap_tcp_services:
|
||||||
|
# 9000: "default/example-go:8080"
|
||||||
|
# ingress_nginx_configmap_udp_services:
|
||||||
|
# 53: "kube-system/coredns:53"
|
||||||
|
# ingress_nginx_extra_args:
|
||||||
|
# - --default-ssl-certificate=default/foo-tls
|
||||||
|
# ingress_nginx_termination_grace_period_seconds: 300
|
||||||
|
# ingress_nginx_class: nginx
|
||||||
|
|
||||||
|
# ALB ingress controller deployment
|
||||||
|
ingress_alb_enabled: false
|
||||||
|
# alb_ingress_aws_region: "us-east-1"
|
||||||
|
# alb_ingress_restrict_scheme: "false"
|
||||||
|
# Enables logging on all outbound requests sent to the AWS API.
|
||||||
|
# If logging is desired, set to true.
|
||||||
|
# alb_ingress_aws_debug: "false"
|
||||||
|
|
||||||
|
# Cert manager deployment
|
||||||
|
cert_manager_enabled: true
|
||||||
|
# cert_manager_namespace: "cert-manager"
|
||||||
|
# cert_manager_tolerations:
|
||||||
|
# - key: node-role.kubernetes.io/master
|
||||||
|
# effect: NoSchedule
|
||||||
|
# - key: node-role.kubernetes.io/control-plane
|
||||||
|
# effect: NoSchedule
|
||||||
|
# cert_manager_affinity:
|
||||||
|
# nodeAffinity:
|
||||||
|
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - weight: 100
|
||||||
|
# preference:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: node-role.kubernetes.io/control-plane
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ""
|
||||||
|
# cert_manager_nodeselector:
|
||||||
|
# kubernetes.io/os: "linux"
|
||||||
|
|
||||||
|
# cert_manager_trusted_internal_ca: |
|
||||||
|
# -----BEGIN CERTIFICATE-----
|
||||||
|
# [REPLACE with your CA certificate]
|
||||||
|
# -----END CERTIFICATE-----
|
||||||
|
# cert_manager_leader_election_namespace: kube-system
|
||||||
|
|
||||||
|
# MetalLB deployment
|
||||||
|
metallb_enabled: true
|
||||||
|
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||||
|
metallb_ip_range:
|
||||||
|
- "192.168.30.201-192.168.30.239"
|
||||||
|
metallb_pool_name: "private-lan"
|
||||||
|
# metallb_auto_assign: true
|
||||||
|
# metallb_avoid_buggy_ips: false
|
||||||
|
# metallb_speaker_nodeselector:
|
||||||
|
# kubernetes.io/os: "linux"
|
||||||
|
# metallb_controller_nodeselector:
|
||||||
|
# kubernetes.io/os: "linux"
|
||||||
|
# metallb_speaker_tolerations:
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - key: "node-role.kubernetes.io/control-plane"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# metallb_controller_tolerations:
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - key: "node-role.kubernetes.io/control-plane"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: ""
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# metallb_version: v0.12.1
|
||||||
|
metallb_protocol: "layer2"
|
||||||
|
# metallb_port: "7472"
|
||||||
|
# metallb_memberlist_port: "7946"
|
||||||
|
metallb_additional_address_pools:
|
||||||
|
public-lan:
|
||||||
|
ip_range:
|
||||||
|
- "192.168.1.30-192.168.1.34"
|
||||||
|
protocol: "layer2"
|
||||||
|
auto_assign: false
|
||||||
|
# avoid_buggy_ips: false
|
||||||
|
# metallb_protocol: "bgp"
|
||||||
|
# metallb_peers:
|
||||||
|
# - peer_address: 192.0.2.1
|
||||||
|
# peer_asn: 64512
|
||||||
|
# my_asn: 4200000000
|
||||||
|
# - peer_address: 192.0.2.2
|
||||||
|
# peer_asn: 64513
|
||||||
|
# my_asn: 4200000000
|
||||||
|
|
||||||
|
argocd_enabled: true
|
||||||
|
argocd_version: v2.5.5
|
||||||
|
# argocd_namespace: argocd
|
||||||
|
# Default password:
|
||||||
|
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||||
|
# ---
|
||||||
|
# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command:
|
||||||
|
# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
|
||||||
|
# ---
|
||||||
|
# Use the following var to set admin password
|
||||||
|
# argocd_admin_password: "password"
|
||||||
|
|
||||||
|
# The plugin manager for kubectl
|
||||||
|
krew_enabled: false
|
||||||
|
krew_root_dir: "/usr/local/krew"
|
350
inventory/c12s-sample/group_vars/k8s_cluster/k8s-cluster.yml
Normal file
350
inventory/c12s-sample/group_vars/k8s_cluster/k8s-cluster.yml
Normal file
|
@ -0,0 +1,350 @@
|
||||||
|
---
|
||||||
|
# Kubernetes configuration dirs and system namespace.
|
||||||
|
# Those are where all the additional config stuff goes
|
||||||
|
# the kubernetes normally puts in /srv/kubernetes.
|
||||||
|
# This puts them in a sane location and namespace.
|
||||||
|
# Editing those values will almost surely break something.
|
||||||
|
kube_config_dir: /etc/kubernetes
|
||||||
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
|
||||||
|
# This is where all the cert scripts and certs will be located
|
||||||
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
|
||||||
|
# This is where all of the bearer tokens will be stored
|
||||||
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
|
|
||||||
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
|
kube_version: v1.25.5
|
||||||
|
|
||||||
|
# Where the binaries will be downloaded.
|
||||||
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
local_release_dir: "/tmp/releases"
|
||||||
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
|
retry_stagger: 5
|
||||||
|
|
||||||
|
# This is the user that owns tha cluster installation.
|
||||||
|
kube_owner: kube
|
||||||
|
|
||||||
|
# This is the group that the cert creation scripts chgrp the
|
||||||
|
# cert files to. Not really changeable...
|
||||||
|
kube_cert_group: kube-cert
|
||||||
|
|
||||||
|
# Cluster Loglevel configuration
|
||||||
|
kube_log_level: 2
|
||||||
|
|
||||||
|
# Directory where credentials will be stored
|
||||||
|
credentials_dir: "{{ inventory_dir }}/credentials"
|
||||||
|
|
||||||
|
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
|
||||||
|
# kube_oidc_auth: false
|
||||||
|
# kube_token_auth: false
|
||||||
|
|
||||||
|
|
||||||
|
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||||
|
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
||||||
|
|
||||||
|
# kube_oidc_url: https:// ...
|
||||||
|
# kube_oidc_client_id: kubernetes
|
||||||
|
## Optional settings for OIDC
|
||||||
|
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
|
||||||
|
# kube_oidc_username_claim: sub
|
||||||
|
# kube_oidc_username_prefix: 'oidc:'
|
||||||
|
# kube_oidc_groups_claim: groups
|
||||||
|
# kube_oidc_groups_prefix: 'oidc:'
|
||||||
|
|
||||||
|
## Variables to control webhook authn/authz
|
||||||
|
# kube_webhook_token_auth: false
|
||||||
|
# kube_webhook_token_auth_url: https://...
|
||||||
|
# kube_webhook_token_auth_url_skip_tls_verify: false
|
||||||
|
|
||||||
|
## For webhook authorization, authorization_modes must include Webhook
|
||||||
|
# kube_webhook_authorization: false
|
||||||
|
# kube_webhook_authorization_url: https://...
|
||||||
|
# kube_webhook_authorization_url_skip_tls_verify: false
|
||||||
|
|
||||||
|
# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
|
kube_network_plugin: calico
|
||||||
|
|
||||||
|
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||||
|
kube_network_plugin_multus: false
|
||||||
|
|
||||||
|
# Kubernetes internal network for services, unused block of space.
|
||||||
|
kube_service_addresses: 10.233.0.0/18
|
||||||
|
|
||||||
|
# internal network. When used, it will assign IP
|
||||||
|
# addresses from this range to individual pods.
|
||||||
|
# This network must be unused in your network infrastructure!
|
||||||
|
kube_pods_subnet: 10.233.64.0/18
|
||||||
|
|
||||||
|
# internal network node size allocation (optional). This is the size allocated
|
||||||
|
# to each node for pod IP address allocation. Note that the number of pods per node is
|
||||||
|
# also limited by the kubelet_max_pods variable which defaults to 110.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||||
|
# - kube_pods_subnet: 10.233.64.0/18
|
||||||
|
# - kube_network_node_prefix: 24
|
||||||
|
# - kubelet_max_pods: 110
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||||
|
# - kube_pods_subnet: 10.233.64.0/18
|
||||||
|
# - kube_network_node_prefix: 25
|
||||||
|
# - kubelet_max_pods: 110
|
||||||
|
kube_network_node_prefix: 24
|
||||||
|
|
||||||
|
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
|
||||||
|
enable_dual_stack_networks: false
|
||||||
|
|
||||||
|
# Kubernetes internal network for IPv6 services, unused block of space.
|
||||||
|
# This is only used if enable_dual_stack_networks is set to true
|
||||||
|
# This provides 4096 IPv6 IPs
|
||||||
|
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
|
||||||
|
|
||||||
|
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
|
||||||
|
# This network must not already be in your network infrastructure!
|
||||||
|
# This is only used if enable_dual_stack_networks is set to true.
|
||||||
|
# This provides room for 256 nodes with 254 pods per node.
|
||||||
|
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||||
|
|
||||||
|
# IPv6 subnet size allocated to each for pods.
|
||||||
|
# This is only used if enable_dual_stack_networks is set to true
|
||||||
|
# This provides room for 254 pods per node.
|
||||||
|
kube_network_node_prefix_ipv6: 120
|
||||||
|
|
||||||
|
# The port the API Server will be listening on.
|
||||||
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||||
|
kube_apiserver_port: 6443 # (https)
|
||||||
|
|
||||||
|
# Kube-proxy proxyMode configuration.
|
||||||
|
# Can be ipvs, iptables
|
||||||
|
kube_proxy_mode: ipvs
|
||||||
|
|
||||||
|
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||||
|
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
|
||||||
|
kube_proxy_strict_arp: true
|
||||||
|
|
||||||
|
# A string slice of values which specify the addresses to use for NodePorts.
|
||||||
|
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
|
||||||
|
# The default empty string slice ([]) means to use all local addresses.
|
||||||
|
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
|
||||||
|
kube_proxy_nodeport_addresses: >-
|
||||||
|
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
|
||||||
|
[{{ kube_proxy_nodeport_addresses_cidr }}]
|
||||||
|
{%- else -%}
|
||||||
|
[]
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
# If non-empty, will use this string as identification instead of the actual hostname
|
||||||
|
# kube_override_hostname: >-
|
||||||
|
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||||
|
# {%- else -%}
|
||||||
|
# {{ inventory_hostname }}
|
||||||
|
# {%- endif -%}
|
||||||
|
|
||||||
|
## Encrypting Secret Data at Rest
|
||||||
|
kube_encrypt_secret_data: false
|
||||||
|
|
||||||
|
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
|
||||||
|
# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow
|
||||||
|
# non-critical podsa to also terminate gracefully
|
||||||
|
# kubelet_shutdown_grace_period: 60s
|
||||||
|
# kubelet_shutdown_grace_period_critical_pods: 20s
|
||||||
|
|
||||||
|
# DNS configuration.
|
||||||
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
|
cluster_name: distrilab.org
|
||||||
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
|
ndots: 2
|
||||||
|
# dns_timeout: 2
|
||||||
|
# dns_attempts: 2
|
||||||
|
# Custom search domains to be added in addition to the default cluster search domains
|
||||||
|
# searchdomains:
|
||||||
|
# - svc.{{ cluster_name }}
|
||||||
|
# - default.svc.{{ cluster_name }}
|
||||||
|
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
|
# remove_default_searchdomains: false
|
||||||
|
# Can be coredns, coredns_dual, manual or none
|
||||||
|
dns_mode: coredns
|
||||||
|
# Set manual server if using a custom cluster DNS server
|
||||||
|
# manual_dns_server: 10.x.x.x
|
||||||
|
# Enable nodelocal dns cache
|
||||||
|
enable_nodelocaldns: true
|
||||||
|
enable_nodelocaldns_secondary: false
|
||||||
|
nodelocaldns_ip: 169.254.25.10
|
||||||
|
nodelocaldns_health_port: 9254
|
||||||
|
nodelocaldns_second_health_port: 9256
|
||||||
|
nodelocaldns_bind_metrics_host_ip: false
|
||||||
|
nodelocaldns_secondary_skew_seconds: 5
|
||||||
|
# nodelocaldns_external_zones:
|
||||||
|
# - zones:
|
||||||
|
# - example.com
|
||||||
|
# - example.io:1053
|
||||||
|
# nameservers:
|
||||||
|
# - 1.1.1.1
|
||||||
|
# - 2.2.2.2
|
||||||
|
# cache: 5
|
||||||
|
# - zones:
|
||||||
|
# - https://mycompany.local:4453
|
||||||
|
# nameservers:
|
||||||
|
# - 192.168.0.53
|
||||||
|
# cache: 0
|
||||||
|
# - zones:
|
||||||
|
# - mydomain.tld
|
||||||
|
# nameservers:
|
||||||
|
# - 10.233.0.3
|
||||||
|
# cache: 5
|
||||||
|
# rewrite:
|
||||||
|
# - name website.tld website.namespace.svc.cluster.local
|
||||||
|
# Enable k8s_external plugin for CoreDNS
|
||||||
|
enable_coredns_k8s_external: false
|
||||||
|
coredns_k8s_external_zone: k8s_external.local
|
||||||
|
# Enable endpoint_pod_names option for kubernetes plugin
|
||||||
|
enable_coredns_k8s_endpoint_pod_names: false
|
||||||
|
# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config
|
||||||
|
# dns_upstream_forward_extra_opts:
|
||||||
|
# policy: sequential
|
||||||
|
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
resolvconf_mode: host_resolvconf
|
||||||
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||||
|
deploy_netchecker: false
|
||||||
|
# Ip address of the kubernetes skydns service
|
||||||
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
|
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
||||||
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
|
## Container runtime
|
||||||
|
## docker for docker, crio for cri-o and containerd for containerd.
|
||||||
|
## Default: containerd
|
||||||
|
container_manager: containerd
|
||||||
|
|
||||||
|
# Additional container runtimes
|
||||||
|
kata_containers_enabled: false
|
||||||
|
|
||||||
|
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
|
||||||
|
|
||||||
|
# K8s image pull policy (imagePullPolicy)
|
||||||
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
|
# audit log for kubernetes
|
||||||
|
kubernetes_audit: false
|
||||||
|
|
||||||
|
# define kubelet config dir for dynamic kubelet
|
||||||
|
# kubelet_config_dir:
|
||||||
|
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||||
|
|
||||||
|
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||||
|
podsecuritypolicy_enabled: false
|
||||||
|
|
||||||
|
# Custom PodSecurityPolicySpec for restricted policy
|
||||||
|
# podsecuritypolicy_restricted_spec: {}
|
||||||
|
|
||||||
|
# Custom PodSecurityPolicySpec for privileged policy
|
||||||
|
# podsecuritypolicy_privileged_spec: {}
|
||||||
|
|
||||||
|
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||||
|
kubeconfig_localhost: true
|
||||||
|
# Use ansible_host as external api ip when copying over kubeconfig.
|
||||||
|
# kubeconfig_localhost_ansible_host: false
|
||||||
|
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||||
|
# kubectl_localhost: false
|
||||||
|
|
||||||
|
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||||
|
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||||
|
# kubelet_enforce_node_allocatable: pods
|
||||||
|
|
||||||
|
## Optionally reserve resources for OS system daemons.
|
||||||
|
# system_reserved: true
|
||||||
|
## Uncomment to override default values
|
||||||
|
# system_memory_reserved: 512Mi
|
||||||
|
# system_cpu_reserved: 500m
|
||||||
|
# system_ephemeral_storage_reserved: 2Gi
|
||||||
|
## Reservation for master hosts
|
||||||
|
# system_master_memory_reserved: 256Mi
|
||||||
|
# system_master_cpu_reserved: 250m
|
||||||
|
# system_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
|
||||||
|
## Eviction Thresholds to avoid system OOMs
|
||||||
|
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
|
||||||
|
# eviction_hard: {}
|
||||||
|
# eviction_hard_control_plane: {}
|
||||||
|
|
||||||
|
# An alternative flexvolume plugin directory
|
||||||
|
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
|
||||||
|
|
||||||
|
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||||
|
## That can be useful for example to setup a keepalived virtual IP
|
||||||
|
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||||
|
|
||||||
|
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
||||||
|
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
|
||||||
|
## Set this variable to true to get rid of this issue
|
||||||
|
volume_cross_zone_attachment: false
|
||||||
|
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
|
||||||
|
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
|
||||||
|
persistent_volumes_enabled: false
|
||||||
|
|
||||||
|
## Container Engine Acceleration
|
||||||
|
## Enable container acceleration feature, for example use gpu acceleration in containers
|
||||||
|
# nvidia_accelerator_enabled: true
|
||||||
|
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
|
||||||
|
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
|
||||||
|
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
|
||||||
|
## Labels and taints won't be set to nodes if they are not in the array.
|
||||||
|
# nvidia_gpu_nodes:
|
||||||
|
# - kube-gpu-001
|
||||||
|
# nvidia_driver_version: "384.111"
|
||||||
|
## flavor can be tesla or gtx
|
||||||
|
# nvidia_gpu_flavor: gtx
|
||||||
|
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
|
||||||
|
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
|
||||||
|
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
|
||||||
|
## NVIDIA GPU device plugin image.
|
||||||
|
# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||||
|
|
||||||
|
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||||
|
# tls_min_version: ""
|
||||||
|
|
||||||
|
## Support tls cipher suites.
|
||||||
|
# tls_cipher_suites: {}
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||||
|
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||||
|
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||||
|
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||||
|
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||||
|
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||||
|
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||||
|
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||||
|
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||||
|
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||||
|
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||||
|
# - TLS_RSA_WITH_AES_128_CBC_SHA
|
||||||
|
# - TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||||
|
# - TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||||
|
# - TLS_RSA_WITH_AES_256_CBC_SHA
|
||||||
|
# - TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||||
|
# - TLS_RSA_WITH_RC4_128_SHA
|
||||||
|
|
||||||
|
## Amount of time to retain events. (default 1h0m0s)
|
||||||
|
event_ttl_duration: "1h0m0s"
|
||||||
|
|
||||||
|
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||||
|
auto_renew_certificates: false
|
||||||
|
# First Monday of each month
|
||||||
|
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
||||||
|
|
||||||
|
# kubeadm patches path
|
||||||
|
kubeadm_patches:
|
||||||
|
enabled: false
|
||||||
|
source_dir: "{{ inventory_dir }}/patches"
|
||||||
|
dest_dir: "{{ kube_config_dir }}/patches"
|
131
inventory/c12s-sample/group_vars/k8s_cluster/k8s-net-calico.yml
Normal file
131
inventory/c12s-sample/group_vars/k8s_cluster/k8s-net-calico.yml
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
---
|
||||||
|
# see roles/network_plugin/calico/defaults/main.yml
|
||||||
|
|
||||||
|
# the default value of name
|
||||||
|
calico_cni_name: k8s-pod-network
|
||||||
|
|
||||||
|
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||||
|
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||||
|
## The subnets of each nodes will be distributed by the datacenter router
|
||||||
|
# peer_with_router: false
|
||||||
|
|
||||||
|
# Enables Internet connectivity from containers
|
||||||
|
# nat_outgoing: true
|
||||||
|
|
||||||
|
# Enables Calico CNI "host-local" IPAM plugin
|
||||||
|
# calico_ipam_host_local: true
|
||||||
|
|
||||||
|
# add default ippool name
|
||||||
|
# calico_pool_name: "default-pool"
|
||||||
|
|
||||||
|
# add default ippool blockSize (defaults kube_network_node_prefix)
|
||||||
|
calico_pool_blocksize: 26
|
||||||
|
|
||||||
|
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
||||||
|
# calico_pool_cidr: 1.2.3.4/5
|
||||||
|
|
||||||
|
# add default ippool CIDR to CNI config
|
||||||
|
# calico_cni_pool: true
|
||||||
|
|
||||||
|
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
|
||||||
|
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||||
|
|
||||||
|
# Add default IPV6 IPPool CIDR to CNI config
|
||||||
|
# calico_cni_pool_ipv6: true
|
||||||
|
|
||||||
|
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||||
|
# global_as_num: "64512"
|
||||||
|
|
||||||
|
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
|
||||||
|
# to be true. All other cases, false.
|
||||||
|
# calico_no_global_as_num: false
|
||||||
|
|
||||||
|
# You can set MTU value here. If left undefined or empty, it will
|
||||||
|
# not be specified in calico CNI config, so Calico will use built-in
|
||||||
|
# defaults. The value should be a number, not a string.
|
||||||
|
# calico_mtu: 1500
|
||||||
|
|
||||||
|
# Configure the MTU to use for workload interfaces and tunnels.
|
||||||
|
# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
|
||||||
|
# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
|
||||||
|
# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
|
||||||
|
# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
|
||||||
|
# calico_veth_mtu: 1440
|
||||||
|
|
||||||
|
# Advertise Cluster IPs
|
||||||
|
# calico_advertise_cluster_ips: true
|
||||||
|
|
||||||
|
# Advertise Service External IPs
|
||||||
|
# calico_advertise_service_external_ips:
|
||||||
|
# - x.x.x.x/24
|
||||||
|
# - y.y.y.y/32
|
||||||
|
|
||||||
|
# Advertise Service LoadBalancer IPs
|
||||||
|
# calico_advertise_service_loadbalancer_ips:
|
||||||
|
# - x.x.x.x/24
|
||||||
|
# - y.y.y.y/16
|
||||||
|
|
||||||
|
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||||
|
# calico_datastore: "kdd"
|
||||||
|
|
||||||
|
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
|
||||||
|
# calico_iptables_backend: "Auto"
|
||||||
|
|
||||||
|
# Use typha (only with kdd)
|
||||||
|
# typha_enabled: false
|
||||||
|
|
||||||
|
# Generate TLS certs for secure typha<->calico-node communication
|
||||||
|
# typha_secure: false
|
||||||
|
|
||||||
|
# Scaling typha: 1 replica per 100 nodes is adequate
|
||||||
|
# Number of typha replicas
|
||||||
|
# typha_replicas: 1
|
||||||
|
|
||||||
|
# Set max typha connections
|
||||||
|
# typha_max_connections_lower_limit: 300
|
||||||
|
|
||||||
|
# Set calico network backend: "bird", "vxlan" or "none"
|
||||||
|
# bird enable BGP routing, required for ipip and no encapsulation modes
|
||||||
|
# calico_network_backend: vxlan
|
||||||
|
|
||||||
|
# IP in IP and VXLAN is mutualy exclusive modes.
|
||||||
|
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
|
||||||
|
# calico_ipip_mode: 'Never'
|
||||||
|
|
||||||
|
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
|
||||||
|
# calico_vxlan_mode: 'Always'
|
||||||
|
|
||||||
|
# set VXLAN port and VNI
|
||||||
|
# calico_vxlan_vni: 4096
|
||||||
|
# calico_vxlan_port: 4789
|
||||||
|
|
||||||
|
# Enable eBPF mode
|
||||||
|
# calico_bpf_enabled: false
|
||||||
|
|
||||||
|
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||||
|
# * can-reach=DESTINATION
|
||||||
|
# * interface=INTERFACE-REGEX
|
||||||
|
# see https://docs.projectcalico.org/reference/node/configuration
|
||||||
|
# calico_ip_auto_method: "interface=eth.*"
|
||||||
|
# calico_ip6_auto_method: "interface=eth.*"
|
||||||
|
|
||||||
|
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection.
|
||||||
|
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
|
||||||
|
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
|
||||||
|
|
||||||
|
# Choose the iptables insert mode for Calico: "Insert" or "Append".
|
||||||
|
# calico_felix_chaininsertmode: Insert
|
||||||
|
|
||||||
|
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
|
||||||
|
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
|
||||||
|
# calico_use_default_route_src_ipaddr: false
|
||||||
|
|
||||||
|
# Enable calico traffic encryption with wireguard
|
||||||
|
# calico_wireguard_enabled: false
|
||||||
|
|
||||||
|
# Under certain situations liveness and readiness probes may need tunning
|
||||||
|
# calico_node_livenessprobe_timeout: 10
|
||||||
|
# calico_node_readinessprobe_timeout: 10
|
||||||
|
|
||||||
|
# Calico apiserver (only with kdd)
|
||||||
|
# calico_apiserver_enabled: false
|
|
@ -0,0 +1,10 @@
|
||||||
|
# see roles/network_plugin/canal/defaults/main.yml
|
||||||
|
|
||||||
|
# The interface used by canal for host <-> host communication.
|
||||||
|
# If left blank, then the interface is choosing using the node's
|
||||||
|
# default route.
|
||||||
|
# canal_iface: ""
|
||||||
|
|
||||||
|
# Whether or not to masquerade traffic to destinations not within
|
||||||
|
# the pod network.
|
||||||
|
# canal_masquerade: "true"
|
245
inventory/c12s-sample/group_vars/k8s_cluster/k8s-net-cilium.yml
Normal file
245
inventory/c12s-sample/group_vars/k8s_cluster/k8s-net-cilium.yml
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
---
|
||||||
|
# cilium_version: "v1.12.1"
|
||||||
|
|
||||||
|
# Log-level
|
||||||
|
# cilium_debug: false
|
||||||
|
|
||||||
|
# cilium_mtu: ""
|
||||||
|
# cilium_enable_ipv4: true
|
||||||
|
# cilium_enable_ipv6: false
|
||||||
|
|
||||||
|
# Cilium agent health port
|
||||||
|
# cilium_agent_health_port: "9879"
|
||||||
|
|
||||||
|
# Identity allocation mode selects how identities are shared between cilium
|
||||||
|
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||||
|
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||||
|
# These can be queried with:
|
||||||
|
# `kubectl get ciliumid`
|
||||||
|
# - "kvstore" stores identities in an etcd kvstore.
|
||||||
|
# - In order to support External Workloads, "crd" is required
|
||||||
|
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
|
||||||
|
# - KVStore operations are only required when cilium-operator is running with any of the below options:
|
||||||
|
# - --synchronize-k8s-services
|
||||||
|
# - --synchronize-k8s-nodes
|
||||||
|
# - --identity-allocation-mode=kvstore
|
||||||
|
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
|
||||||
|
# cilium_identity_allocation_mode: kvstore
|
||||||
|
|
||||||
|
# Etcd SSL dirs
|
||||||
|
# cilium_cert_dir: /etc/cilium/certs
|
||||||
|
# kube_etcd_cacert_file: ca.pem
|
||||||
|
# kube_etcd_cert_file: cert.pem
|
||||||
|
# kube_etcd_key_file: cert-key.pem
|
||||||
|
|
||||||
|
# Limits for apps
|
||||||
|
# cilium_memory_limit: 500M
|
||||||
|
# cilium_cpu_limit: 500m
|
||||||
|
# cilium_memory_requests: 64M
|
||||||
|
# cilium_cpu_requests: 100m
|
||||||
|
|
||||||
|
# Overlay Network Mode
|
||||||
|
# cilium_tunnel_mode: vxlan
|
||||||
|
# Optional features
|
||||||
|
# cilium_enable_prometheus: false
|
||||||
|
# Enable if you want to make use of hostPort mappings
|
||||||
|
# cilium_enable_portmap: false
|
||||||
|
# Monitor aggregation level (none/low/medium/maximum)
|
||||||
|
# cilium_monitor_aggregation: medium
|
||||||
|
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||||
|
# first observation, cause monitor notifications to be generated.
|
||||||
|
#
|
||||||
|
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||||
|
# cilium_monitor_aggregation_flags: "all"
|
||||||
|
# Kube Proxy Replacement mode (strict/probe/partial)
|
||||||
|
# cilium_kube_proxy_replacement: probe
|
||||||
|
|
||||||
|
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||||
|
# to prevent service disruptions. See also:
|
||||||
|
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||||
|
# cilium_preallocate_bpf_maps: false
|
||||||
|
|
||||||
|
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
|
||||||
|
# cilium_tofqdns_enable_poller: false
|
||||||
|
|
||||||
|
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
|
||||||
|
# cilium_enable_legacy_services: false
|
||||||
|
|
||||||
|
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||||
|
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||||
|
# This value is not defined by default
|
||||||
|
# cilium_cluster_id:
|
||||||
|
|
||||||
|
# Deploy cilium even if kube_network_plugin is not cilium.
|
||||||
|
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
|
||||||
|
# cilium_deploy_additionally: false
|
||||||
|
|
||||||
|
# Auto direct nodes routes can be used to advertise pods routes in your cluster
|
||||||
|
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
|
||||||
|
# This works only if you have a L2 connectivity between all your nodes.
|
||||||
|
# You wil also have to specify the variable `cilium_native_routing_cidr` to
|
||||||
|
# make this work. Please refer to the cilium documentation for more
|
||||||
|
# information about this kind of setups.
|
||||||
|
# cilium_auto_direct_node_routes: false
|
||||||
|
|
||||||
|
# Allows to explicitly specify the IPv4 CIDR for native routing.
|
||||||
|
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
||||||
|
# hands traffic destined for that range to the Linux network stack without
|
||||||
|
# applying any SNAT.
|
||||||
|
# Generally speaking, specifying a native routing CIDR implies that Cilium can
|
||||||
|
# depend on the underlying networking stack to route packets to their
|
||||||
|
# destination. To offer a concrete example, if Cilium is configured to use
|
||||||
|
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
|
||||||
|
# the user must configure the routes to reach pods, either manually or by
|
||||||
|
# setting the auto-direct-node-routes flag.
|
||||||
|
# cilium_native_routing_cidr: ""
|
||||||
|
|
||||||
|
# Allows to explicitly specify the IPv6 CIDR for native routing.
|
||||||
|
# cilium_native_routing_cidr_ipv6: ""
|
||||||
|
|
||||||
|
# Enable transparent network encryption.
|
||||||
|
# cilium_encryption_enabled: false
|
||||||
|
|
||||||
|
# Encryption method. Can be either ipsec or wireguard.
|
||||||
|
# Only effective when `cilium_encryption_enabled` is set to true.
|
||||||
|
# cilium_encryption_type: "ipsec"
|
||||||
|
|
||||||
|
# Enable encryption for pure node to node traffic.
|
||||||
|
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
|
||||||
|
# cilium_ipsec_node_encryption: false
|
||||||
|
|
||||||
|
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
|
||||||
|
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
|
||||||
|
# it will fallback on the wireguard-go user-space implementation of WireGuard.
|
||||||
|
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
|
||||||
|
# cilium_wireguard_userspace_fallback: false
|
||||||
|
|
||||||
|
# IP Masquerade Agent
|
||||||
|
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||||||
|
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||||||
|
# cilium_ip_masq_agent_enable: false
|
||||||
|
|
||||||
|
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||||||
|
# cilium_non_masquerade_cidrs:
|
||||||
|
# - 10.0.0.0/8
|
||||||
|
# - 172.16.0.0/12
|
||||||
|
# - 192.168.0.0/16
|
||||||
|
# - 100.64.0.0/10
|
||||||
|
# - 192.0.0.0/24
|
||||||
|
# - 192.0.2.0/24
|
||||||
|
# - 192.88.99.0/24
|
||||||
|
# - 198.18.0.0/15
|
||||||
|
# - 198.51.100.0/24
|
||||||
|
# - 203.0.113.0/24
|
||||||
|
# - 240.0.0.0/4
|
||||||
|
### Indicates whether to masquerade traffic to the link local prefix.
|
||||||
|
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
|
||||||
|
# cilium_masq_link_local: false
|
||||||
|
### A time interval at which the agent attempts to reload config from disk
|
||||||
|
# cilium_ip_masq_resync_interval: 60s
|
||||||
|
|
||||||
|
# Hubble
|
||||||
|
### Enable Hubble without install
|
||||||
|
# cilium_enable_hubble: false
|
||||||
|
### Enable Hubble Metrics
|
||||||
|
# cilium_enable_hubble_metrics: false
|
||||||
|
### if cilium_enable_hubble_metrics: true
|
||||||
|
# cilium_hubble_metrics: {}
|
||||||
|
# - dns
|
||||||
|
# - drop
|
||||||
|
# - tcp
|
||||||
|
# - flow
|
||||||
|
# - icmp
|
||||||
|
# - http
|
||||||
|
### Enable Hubble install
|
||||||
|
# cilium_hubble_install: false
|
||||||
|
### Enable auto generate certs if cilium_hubble_install: true
|
||||||
|
# cilium_hubble_tls_generate: false
|
||||||
|
|
||||||
|
# IP address management mode for v1.9+.
|
||||||
|
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
||||||
|
# cilium_ipam_mode: kubernetes
|
||||||
|
|
||||||
|
# Extra arguments for the Cilium agent
|
||||||
|
# cilium_agent_custom_args: []
|
||||||
|
|
||||||
|
# For adding and mounting extra volumes to the cilium agent
|
||||||
|
# cilium_agent_extra_volumes: []
|
||||||
|
# cilium_agent_extra_volume_mounts: []
|
||||||
|
|
||||||
|
# cilium_agent_extra_env_vars: []
|
||||||
|
|
||||||
|
# cilium_operator_replicas: 2
|
||||||
|
|
||||||
|
# The address at which the cillium operator bind health check api
|
||||||
|
# cilium_operator_api_serve_addr: "127.0.0.1:9234"
|
||||||
|
|
||||||
|
## A dictionary of extra config variables to add to cilium-config, formatted like:
|
||||||
|
## cilium_config_extra_vars:
|
||||||
|
## var1: "value1"
|
||||||
|
## var2: "value2"
|
||||||
|
# cilium_config_extra_vars: {}
|
||||||
|
|
||||||
|
# For adding and mounting extra volumes to the cilium operator
|
||||||
|
# cilium_operator_extra_volumes: []
|
||||||
|
# cilium_operator_extra_volume_mounts: []
|
||||||
|
|
||||||
|
# Extra arguments for the Cilium Operator
|
||||||
|
# cilium_operator_custom_args: []
|
||||||
|
|
||||||
|
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||||
|
# cilium_cluster_name: default
|
||||||
|
|
||||||
|
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
|
||||||
|
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
|
||||||
|
# Available for Cilium v1.10 and up.
|
||||||
|
# cilium_cni_exclusive: true
|
||||||
|
|
||||||
|
# Configure the log file for CNI logging with retention policy of 7 days.
|
||||||
|
# Disable CNI file logging by setting this field to empty explicitly.
|
||||||
|
# Available for Cilium v1.12 and up.
|
||||||
|
# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
|
||||||
|
|
||||||
|
# -- Configure cgroup related configuration
|
||||||
|
# -- Enable auto mount of cgroup2 filesystem.
|
||||||
|
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
|
||||||
|
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
|
||||||
|
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
|
||||||
|
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
|
||||||
|
# volume will be mounted inside the cilium agent pod at the same path.
|
||||||
|
# Available for Cilium v1.11 and up
|
||||||
|
# cilium_cgroup_auto_mount: true
|
||||||
|
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
|
||||||
|
# cilium_cgroup_host_root: "/run/cilium/cgroupv2"
|
||||||
|
|
||||||
|
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||||
|
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||||
|
# cilium_bpf_map_dynamic_size_ratio: "0.0"
|
||||||
|
|
||||||
|
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
|
||||||
|
# Available for Cilium v1.10 and up
|
||||||
|
# cilium_enable_ipv4_masquerade: true
|
||||||
|
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
|
||||||
|
# Available for Cilium v1.10 and up
|
||||||
|
# cilium_enable_ipv6_masquerade: true
|
||||||
|
|
||||||
|
# -- Enable native IP masquerade support in eBPF
|
||||||
|
# cilium_enable_bpf_masquerade: false
|
||||||
|
|
||||||
|
# -- Configure whether direct routing mode should route traffic via
|
||||||
|
# host stack (true) or directly and more efficiently out of BPF (false) if
|
||||||
|
# the kernel supports it. The latter has the implication that it will also
|
||||||
|
# bypass netfilter in the host namespace.
|
||||||
|
# cilium_enable_host_legacy_routing: true
|
||||||
|
|
||||||
|
# -- Enable use of the remote node identity.
|
||||||
|
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
|
||||||
|
# cilium_enable_remote_node_identity: true
|
||||||
|
|
||||||
|
# -- Enable the use of well-known identities.
|
||||||
|
# cilium_enable_well_known_identities: false
|
||||||
|
|
||||||
|
# cilium_enable_bpf_clock_probe: true
|
||||||
|
|
||||||
|
# -- Whether to enable CNP status updates.
|
||||||
|
# cilium_disable_cnp_status_updates: true
|
|
@ -0,0 +1,18 @@
|
||||||
|
# see roles/network_plugin/flannel/defaults/main.yml
|
||||||
|
|
||||||
|
## interface that should be used for flannel operations
|
||||||
|
## This is actually an inventory cluster-level item
|
||||||
|
# flannel_interface:
|
||||||
|
|
||||||
|
## Select interface that should be used for flannel operations by regexp on Name or IP
|
||||||
|
## This is actually an inventory cluster-level item
|
||||||
|
## example: select interface with ip from net 10.0.0.0/23
|
||||||
|
## single quote and escape backslashes
|
||||||
|
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
||||||
|
|
||||||
|
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard'
|
||||||
|
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
||||||
|
# flannel_backend_type: "vxlan"
|
||||||
|
# flannel_vxlan_vni: 1
|
||||||
|
# flannel_vxlan_port: 8472
|
||||||
|
# flannel_vxlan_direct_routing: false
|
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# geneve or vlan
|
||||||
|
kube_ovn_network_type: geneve
|
||||||
|
|
||||||
|
# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module
|
||||||
|
kube_ovn_tunnel_type: geneve
|
||||||
|
|
||||||
|
## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use.
|
||||||
|
# kube_ovn_iface: eth1
|
||||||
|
## The MTU used by pod iface in overlay networks (default iface MTU - 100)
|
||||||
|
# kube_ovn_mtu: 1333
|
||||||
|
|
||||||
|
## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port.
|
||||||
|
kube_ovn_hw_offload: false
|
||||||
|
# traffic mirror
|
||||||
|
kube_ovn_traffic_mirror: false
|
||||||
|
|
||||||
|
# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||||
|
# kube_ovn_default_interface_name: eth0
|
||||||
|
|
||||||
|
kube_ovn_external_address: 8.8.8.8
|
||||||
|
kube_ovn_external_address_ipv6: 2400:3200::1
|
||||||
|
kube_ovn_external_dns: alauda.cn
|
||||||
|
|
||||||
|
# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0
|
||||||
|
kube_ovn_default_gateway_check: true
|
||||||
|
kube_ovn_default_logical_gateway: false
|
||||||
|
# kube_ovn_default_exclude_ips: 10.16.0.1
|
||||||
|
kube_ovn_node_switch_cidr: 100.64.0.0/16
|
||||||
|
kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64
|
||||||
|
|
||||||
|
## vlan config, set default interface name and vlan id
|
||||||
|
# kube_ovn_default_interface_name: eth0
|
||||||
|
kube_ovn_default_vlan_id: 100
|
||||||
|
kube_ovn_vlan_name: product
|
||||||
|
|
||||||
|
## pod nic type, support: veth-pair or internal-port
|
||||||
|
kube_ovn_pod_nic_type: veth_pair
|
||||||
|
|
||||||
|
## Enable load balancer
|
||||||
|
kube_ovn_enable_lb: true
|
||||||
|
|
||||||
|
## Enable network policy support
|
||||||
|
kube_ovn_enable_np: true
|
||||||
|
|
||||||
|
## Enable external vpc support
|
||||||
|
kube_ovn_enable_external_vpc: true
|
||||||
|
|
||||||
|
## Enable checksum
|
||||||
|
kube_ovn_encap_checksum: true
|
||||||
|
|
||||||
|
## enable ssl
|
||||||
|
kube_ovn_enable_ssl: false
|
||||||
|
|
||||||
|
## dpdk
|
||||||
|
kube_ovn_dpdk_enabled: false
|
|
@ -0,0 +1,64 @@
|
||||||
|
# See roles/network_plugin/kube-router//defaults/main.yml
|
||||||
|
|
||||||
|
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
|
||||||
|
# kube_router_run_router: true
|
||||||
|
|
||||||
|
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
|
||||||
|
# kube_router_run_firewall: true
|
||||||
|
|
||||||
|
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
|
||||||
|
# see docs/kube-router.md "Caveats" section
|
||||||
|
# kube_router_run_service_proxy: false
|
||||||
|
|
||||||
|
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
|
||||||
|
# kube_router_advertise_cluster_ip: false
|
||||||
|
|
||||||
|
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
|
||||||
|
# kube_router_advertise_external_ip: false
|
||||||
|
|
||||||
|
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||||
|
# kube_router_advertise_loadbalancer_ip: false
|
||||||
|
|
||||||
|
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||||
|
# kube_router_enable_dsr: false
|
||||||
|
|
||||||
|
# Array of arbitrary extra arguments to kube-router, see
|
||||||
|
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
|
||||||
|
# kube_router_extra_args: []
|
||||||
|
|
||||||
|
# ASN number of the cluster, used when communicating with external BGP routers
|
||||||
|
# kube_router_cluster_asn: ~
|
||||||
|
|
||||||
|
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
|
||||||
|
# kube_router_peer_router_asns: ~
|
||||||
|
|
||||||
|
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
|
||||||
|
# kube_router_peer_router_ips: ~
|
||||||
|
|
||||||
|
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
|
||||||
|
# kube_router_peer_router_ports: ~
|
||||||
|
|
||||||
|
# Setups node CNI to allow hairpin mode, requires node reboots, see
|
||||||
|
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
|
||||||
|
# kube_router_support_hairpin_mode: false
|
||||||
|
|
||||||
|
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
|
||||||
|
# kube_router_dns_policy: ClusterFirstWithHostNet
|
||||||
|
|
||||||
|
# Array of annotations for master
|
||||||
|
# kube_router_annotations_master: []
|
||||||
|
|
||||||
|
# Array of annotations for every node
|
||||||
|
# kube_router_annotations_node: []
|
||||||
|
|
||||||
|
# Array of common annotations for every node
|
||||||
|
# kube_router_annotations_all: []
|
||||||
|
|
||||||
|
# Enables scraping kube-router metrics with Prometheus
|
||||||
|
# kube_router_enable_metrics: false
|
||||||
|
|
||||||
|
# Path to serve Prometheus metrics on
|
||||||
|
# kube_router_metrics_path: /metrics
|
||||||
|
|
||||||
|
# Prometheus metrics port to use
|
||||||
|
# kube_router_metrics_port: 9255
|
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
# private interface, on a l2-network
|
||||||
|
macvlan_interface: "eth1"
|
||||||
|
|
||||||
|
# Enable nat in default gateway network interface
|
||||||
|
enable_nat_default_gateway: true
|
|
@ -0,0 +1,64 @@
|
||||||
|
# see roles/network_plugin/weave/defaults/main.yml
|
||||||
|
|
||||||
|
# Weave's network password for encryption, if null then no network encryption.
|
||||||
|
# weave_password: ~
|
||||||
|
|
||||||
|
# If set to 1, disable checking for new Weave Net versions (default is blank,
|
||||||
|
# i.e. check is enabled)
|
||||||
|
# weave_checkpoint_disable: false
|
||||||
|
|
||||||
|
# Soft limit on the number of connections between peers. Defaults to 100.
|
||||||
|
# weave_conn_limit: 100
|
||||||
|
|
||||||
|
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
|
||||||
|
# for containers attached. If you need to disable hairpin, e.g. your kernel is
|
||||||
|
# one of those that can panic if hairpin is enabled, then you can disable it by
|
||||||
|
# setting `HAIRPIN_MODE=false`.
|
||||||
|
# weave_hairpin_mode: true
|
||||||
|
|
||||||
|
# The range of IP addresses used by Weave Net and the subnet they are placed in
|
||||||
|
# (CIDR format; default 10.32.0.0/12)
|
||||||
|
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
|
||||||
|
|
||||||
|
# Set to 0 to disable Network Policy Controller (default is on)
|
||||||
|
# weave_expect_npc: "{{ enable_network_policy }}"
|
||||||
|
|
||||||
|
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
|
||||||
|
# list from the api-server)
|
||||||
|
# weave_kube_peers: ~
|
||||||
|
|
||||||
|
# Set the initialization mode of the IP Address Manager (defaults to consensus
|
||||||
|
# amongst the KUBE_PEERS)
|
||||||
|
# weave_ipalloc_init: ~
|
||||||
|
|
||||||
|
# Set the IP address used as a gateway from the Weave network to the host
|
||||||
|
# network - this is useful if you are configuring the addon as a static pod.
|
||||||
|
# weave_expose_ip: ~
|
||||||
|
|
||||||
|
# Address and port that the Weave Net daemon will serve Prometheus-style
|
||||||
|
# metrics on (defaults to 0.0.0.0:6782)
|
||||||
|
# weave_metrics_addr: ~
|
||||||
|
|
||||||
|
# Address and port that the Weave Net daemon will serve status requests on
|
||||||
|
# (defaults to disabled)
|
||||||
|
# weave_status_addr: ~
|
||||||
|
|
||||||
|
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
|
||||||
|
# underlying network has a tighter limit, or set a larger size for better
|
||||||
|
# performance if your network supports jumbo frames (e.g. 8916)
|
||||||
|
# weave_mtu: 1376
|
||||||
|
|
||||||
|
# Set to 1 to preserve the client source IP address when accessing Service
|
||||||
|
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
|
||||||
|
# only with Weave IPAM (default).
|
||||||
|
# weave_no_masq_local: true
|
||||||
|
|
||||||
|
# set to nft to use nftables backend for iptables (default is iptables)
|
||||||
|
# weave_iptables_backend: iptables
|
||||||
|
|
||||||
|
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
|
||||||
|
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
|
||||||
|
# weave_extra_args: ~
|
||||||
|
|
||||||
|
# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error
|
||||||
|
# weave_npc_extra_args: ~
|
41
inventory/c12s-sample/inventory.ini
Normal file
41
inventory/c12s-sample/inventory.ini
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||||
|
# ## different ip than the default iface
|
||||||
|
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
|
||||||
|
[all]
|
||||||
|
vig-k8s-e01 ansible_host=192.168.30.35 etcd_member_name=etcd01
|
||||||
|
vig-k8s-e02 ansible_host=192.168.30.36 etcd_member_name=etcd02
|
||||||
|
vig-k8s-e03 ansible_host=192.168.30.37 etcd_member_name=etcd03
|
||||||
|
vig-k8s-c01 ansible_host=192.168.30.38
|
||||||
|
vig-k8s-c02 ansible_host=192.168.30.39
|
||||||
|
vig-k8s-c03 ansible_host=192.168.30.40
|
||||||
|
vig-k8s-w01 ansible_host=192.168.30.41
|
||||||
|
vig-k8s-w02 ansible_host=192.168.30.42
|
||||||
|
vig-k8s-w03 ansible_host=192.168.30.43
|
||||||
|
|
||||||
|
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
|
||||||
|
|
||||||
|
# ## configure a bastion host if your nodes are not directly reachable
|
||||||
|
# [bastion]
|
||||||
|
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||||
|
|
||||||
|
[kube_control_plane]
|
||||||
|
vig-k8s-c01
|
||||||
|
vig-k8s-c02
|
||||||
|
vig-k8s-c03
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
vig-k8s-e01
|
||||||
|
vig-k8s-e02
|
||||||
|
vig-k8s-e03
|
||||||
|
|
||||||
|
[kube_node]
|
||||||
|
vig-k8s-w01
|
||||||
|
vig-k8s-w02
|
||||||
|
vig-k8s-w03
|
||||||
|
|
||||||
|
[calico_rr]
|
||||||
|
|
||||||
|
[k8s_cluster:children]
|
||||||
|
kube_control_plane
|
||||||
|
kube_node
|
||||||
|
calico_rr
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-controller-manager
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: 'true'
|
||||||
|
prometheus.io/port: '10257'
|
8
inventory/c12s-sample/patches/kube-scheduler+merge.yaml
Normal file
8
inventory/c12s-sample/patches/kube-scheduler+merge.yaml
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-scheduler
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: 'true'
|
||||||
|
prometheus.io/port: '10259'
|
|
@ -35,6 +35,11 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||||
|
|
||||||
### OTHER OPTIONAL VARIABLES
|
### OTHER OPTIONAL VARIABLES
|
||||||
|
|
||||||
|
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
|
||||||
|
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
|
||||||
|
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
|
||||||
|
# disable_host_nameservers: false
|
||||||
|
|
||||||
## Upstream dns servers
|
## Upstream dns servers
|
||||||
# upstream_dns_servers:
|
# upstream_dns_servers:
|
||||||
# - 8.8.8.8
|
# - 8.8.8.8
|
||||||
|
@ -130,3 +135,6 @@ ntp_servers:
|
||||||
- "1.pool.ntp.org iburst"
|
- "1.pool.ntp.org iburst"
|
||||||
- "2.pool.ntp.org iburst"
|
- "2.pool.ntp.org iburst"
|
||||||
- "3.pool.ntp.org iburst"
|
- "3.pool.ntp.org iburst"
|
||||||
|
|
||||||
|
## Used to control no_log attribute
|
||||||
|
unsafe_show_logs: false
|
||||||
|
|
|
@ -37,6 +37,9 @@
|
||||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||||
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] Cilium: If using Cilium network plugin
|
||||||
|
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
# [Optional] Flannel: If using Falnnel network plugin
|
# [Optional] Flannel: If using Falnnel network plugin
|
||||||
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
|
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
|
||||||
|
|
||||||
|
@ -61,6 +64,10 @@
|
||||||
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
|
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
|
||||||
|
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
|
||||||
|
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
|
||||||
|
|
||||||
## CentOS/Redhat/AlmaLinux
|
## CentOS/Redhat/AlmaLinux
|
||||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||||
### By default we enable those repo automatically
|
### By default we enable those repo automatically
|
||||||
|
@ -82,8 +89,8 @@
|
||||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
||||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
||||||
### Containerd
|
### Containerd
|
||||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
|
||||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
|
||||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
||||||
|
|
||||||
## Ubuntu
|
## Ubuntu
|
||||||
|
|
|
@ -7,13 +7,18 @@
|
||||||
# upcloud_csi_provisioner_image_tag: "v3.1.0"
|
# upcloud_csi_provisioner_image_tag: "v3.1.0"
|
||||||
# upcloud_csi_attacher_image_tag: "v3.4.0"
|
# upcloud_csi_attacher_image_tag: "v3.4.0"
|
||||||
# upcloud_csi_resizer_image_tag: "v1.4.0"
|
# upcloud_csi_resizer_image_tag: "v1.4.0"
|
||||||
# upcloud_csi_plugin_image_tag: "v0.2.1"
|
# upcloud_csi_plugin_image_tag: "v0.3.3"
|
||||||
# upcloud_csi_node_image_tag: "v2.5.0"
|
# upcloud_csi_node_image_tag: "v2.5.0"
|
||||||
# upcloud_tolerations: []
|
# upcloud_tolerations: []
|
||||||
## Storage class options
|
## Storage class options
|
||||||
# expand_persistent_volumes: true
|
|
||||||
# parameters:
|
|
||||||
# tier: maxiops # or hdd
|
|
||||||
# storage_classes:
|
# storage_classes:
|
||||||
# - name: standard
|
# - name: standard
|
||||||
# is_default: true
|
# is_default: true
|
||||||
|
# expand_persistent_volumes: true
|
||||||
|
# parameters:
|
||||||
|
# tier: maxiops
|
||||||
|
# - name: hdd
|
||||||
|
# is_default: false
|
||||||
|
# expand_persistent_volumes: true
|
||||||
|
# parameters:
|
||||||
|
# tier: hdd
|
|
@ -18,6 +18,8 @@ metrics_server_enabled: false
|
||||||
# metrics_server_kubelet_insecure_tls: true
|
# metrics_server_kubelet_insecure_tls: true
|
||||||
# metrics_server_metric_resolution: 15s
|
# metrics_server_metric_resolution: 15s
|
||||||
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
|
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
|
||||||
|
# metrics_server_host_network: false
|
||||||
|
# metrics_server_replicas: 1
|
||||||
|
|
||||||
# Rancher Local Path Provisioner
|
# Rancher Local Path Provisioner
|
||||||
local_path_provisioner_enabled: false
|
local_path_provisioner_enabled: false
|
||||||
|
@ -161,11 +163,12 @@ cert_manager_enabled: false
|
||||||
|
|
||||||
# MetalLB deployment
|
# MetalLB deployment
|
||||||
metallb_enabled: false
|
metallb_enabled: false
|
||||||
metallb_speaker_enabled: true
|
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||||
# metallb_ip_range:
|
# metallb_ip_range:
|
||||||
# - "10.5.0.50-10.5.0.99"
|
# - "10.5.0.50-10.5.0.99"
|
||||||
# metallb_pool_name: "loadbalanced"
|
# metallb_pool_name: "loadbalanced"
|
||||||
# metallb_auto_assign: true
|
# metallb_auto_assign: true
|
||||||
|
# metallb_avoid_buggy_ips: false
|
||||||
# metallb_speaker_nodeselector:
|
# metallb_speaker_nodeselector:
|
||||||
# kubernetes.io/os: "linux"
|
# kubernetes.io/os: "linux"
|
||||||
# metallb_controller_nodeselector:
|
# metallb_controller_nodeselector:
|
||||||
|
@ -198,6 +201,7 @@ metallb_speaker_enabled: true
|
||||||
# - "10.5.1.50-10.5.1.99"
|
# - "10.5.1.50-10.5.1.99"
|
||||||
# protocol: "layer2"
|
# protocol: "layer2"
|
||||||
# auto_assign: false
|
# auto_assign: false
|
||||||
|
# avoid_buggy_ips: false
|
||||||
# metallb_protocol: "bgp"
|
# metallb_protocol: "bgp"
|
||||||
# metallb_peers:
|
# metallb_peers:
|
||||||
# - peer_address: 192.0.2.1
|
# - peer_address: 192.0.2.1
|
||||||
|
@ -207,9 +211,8 @@ metallb_speaker_enabled: true
|
||||||
# peer_asn: 64513
|
# peer_asn: 64513
|
||||||
# my_asn: 4200000000
|
# my_asn: 4200000000
|
||||||
|
|
||||||
|
|
||||||
argocd_enabled: false
|
argocd_enabled: false
|
||||||
# argocd_version: v2.4.7
|
# argocd_version: v2.4.16
|
||||||
# argocd_namespace: argocd
|
# argocd_namespace: argocd
|
||||||
# Default password:
|
# Default password:
|
||||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||||
|
|
|
@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.24.3
|
kube_version: v1.25.5
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
@ -125,7 +125,7 @@ kube_apiserver_port: 6443 # (https)
|
||||||
kube_proxy_mode: ipvs
|
kube_proxy_mode: ipvs
|
||||||
|
|
||||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||||
# must be set to true for MetalLB to work
|
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
|
||||||
kube_proxy_strict_arp: false
|
kube_proxy_strict_arp: false
|
||||||
|
|
||||||
# A string slice of values which specify the addresses to use for NodePorts.
|
# A string slice of values which specify the addresses to use for NodePorts.
|
||||||
|
@ -160,6 +160,14 @@ kube_encrypt_secret_data: false
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
ndots: 2
|
ndots: 2
|
||||||
|
# dns_timeout: 2
|
||||||
|
# dns_attempts: 2
|
||||||
|
# Custom search domains to be added in addition to the default cluster search domains
|
||||||
|
# searchdomains:
|
||||||
|
# - svc.{{ cluster_name }}
|
||||||
|
# - default.svc.{{ cluster_name }}
|
||||||
|
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
|
# remove_default_searchdomains: false
|
||||||
# Can be coredns, coredns_dual, manual or none
|
# Can be coredns, coredns_dual, manual or none
|
||||||
dns_mode: coredns
|
dns_mode: coredns
|
||||||
# Set manual server if using a custom cluster DNS server
|
# Set manual server if using a custom cluster DNS server
|
||||||
|
@ -185,11 +193,21 @@ nodelocaldns_secondary_skew_seconds: 5
|
||||||
# nameservers:
|
# nameservers:
|
||||||
# - 192.168.0.53
|
# - 192.168.0.53
|
||||||
# cache: 0
|
# cache: 0
|
||||||
|
# - zones:
|
||||||
|
# - mydomain.tld
|
||||||
|
# nameservers:
|
||||||
|
# - 10.233.0.3
|
||||||
|
# cache: 5
|
||||||
|
# rewrite:
|
||||||
|
# - name website.tld website.namespace.svc.cluster.local
|
||||||
# Enable k8s_external plugin for CoreDNS
|
# Enable k8s_external plugin for CoreDNS
|
||||||
enable_coredns_k8s_external: false
|
enable_coredns_k8s_external: false
|
||||||
coredns_k8s_external_zone: k8s_external.local
|
coredns_k8s_external_zone: k8s_external.local
|
||||||
# Enable endpoint_pod_names option for kubernetes plugin
|
# Enable endpoint_pod_names option for kubernetes plugin
|
||||||
enable_coredns_k8s_endpoint_pod_names: false
|
enable_coredns_k8s_endpoint_pod_names: false
|
||||||
|
# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config
|
||||||
|
# dns_upstream_forward_extra_opts:
|
||||||
|
# policy: sequential
|
||||||
|
|
||||||
# Can be docker_dns, host_resolvconf or none
|
# Can be docker_dns, host_resolvconf or none
|
||||||
resolvconf_mode: host_resolvconf
|
resolvconf_mode: host_resolvconf
|
||||||
|
@ -324,3 +342,9 @@ event_ttl_duration: "1h0m0s"
|
||||||
auto_renew_certificates: false
|
auto_renew_certificates: false
|
||||||
# First Monday of each month
|
# First Monday of each month
|
||||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
||||||
|
|
||||||
|
# kubeadm patches path
|
||||||
|
kubeadm_patches:
|
||||||
|
enabled: false
|
||||||
|
source_dir: "{{ inventory_dir }}/patches"
|
||||||
|
dest_dir: "{{ kube_config_dir }}/patches"
|
||||||
|
|
|
@ -60,7 +60,7 @@ calico_pool_blocksize: 26
|
||||||
# - x.x.x.x/24
|
# - x.x.x.x/24
|
||||||
# - y.y.y.y/32
|
# - y.y.y.y/32
|
||||||
|
|
||||||
# Adveritse Service LoadBalancer IPs
|
# Advertise Service LoadBalancer IPs
|
||||||
# calico_advertise_service_loadbalancer_ips:
|
# calico_advertise_service_loadbalancer_ips:
|
||||||
# - x.x.x.x/24
|
# - x.x.x.x/24
|
||||||
# - y.y.y.y/16
|
# - y.y.y.y/16
|
||||||
|
@ -99,7 +99,7 @@ calico_pool_blocksize: 26
|
||||||
# calico_vxlan_vni: 4096
|
# calico_vxlan_vni: 4096
|
||||||
# calico_vxlan_port: 4789
|
# calico_vxlan_port: 4789
|
||||||
|
|
||||||
# Cenable eBPF mode
|
# Enable eBPF mode
|
||||||
# calico_bpf_enabled: false
|
# calico_bpf_enabled: false
|
||||||
|
|
||||||
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
|
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||||
|
@ -109,6 +109,10 @@ calico_pool_blocksize: 26
|
||||||
# calico_ip_auto_method: "interface=eth.*"
|
# calico_ip_auto_method: "interface=eth.*"
|
||||||
# calico_ip6_auto_method: "interface=eth.*"
|
# calico_ip6_auto_method: "interface=eth.*"
|
||||||
|
|
||||||
|
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection.
|
||||||
|
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
|
||||||
|
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
|
||||||
|
|
||||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
|
# Choose the iptables insert mode for Calico: "Insert" or "Append".
|
||||||
# calico_felix_chaininsertmode: Insert
|
# calico_felix_chaininsertmode: Insert
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
# cilium_version: "v1.11.7"
|
# cilium_version: "v1.12.1"
|
||||||
|
|
||||||
# Log-level
|
# Log-level
|
||||||
# cilium_debug: false
|
# cilium_debug: false
|
||||||
|
@ -118,6 +118,7 @@
|
||||||
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||||||
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||||||
# cilium_ip_masq_agent_enable: false
|
# cilium_ip_masq_agent_enable: false
|
||||||
|
|
||||||
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||||||
# cilium_non_masquerade_cidrs:
|
# cilium_non_masquerade_cidrs:
|
||||||
# - 10.0.0.0/8
|
# - 10.0.0.0/8
|
||||||
|
|
|
@ -10,9 +10,9 @@
|
||||||
## single quote and escape backslashes
|
## single quote and escape backslashes
|
||||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
||||||
|
|
||||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
|
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard'
|
||||||
# for experimental backend
|
|
||||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
||||||
# flannel_backend_type: "vxlan"
|
# flannel_backend_type: "vxlan"
|
||||||
# flannel_vxlan_vni: 1
|
# flannel_vxlan_vni: 1
|
||||||
# flannel_vxlan_port: 8472
|
# flannel_vxlan_port: 8472
|
||||||
|
# flannel_vxlan_direct_routing: false
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
|
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
|
||||||
# kube_router_advertise_external_ip: false
|
# kube_router_advertise_external_ip: false
|
||||||
|
|
||||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||||
# kube_router_advertise_loadbalancer_ip: false
|
# kube_router_advertise_loadbalancer_ip: false
|
||||||
|
|
||||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue