Compare commits
78 commits
Author | SHA1 | Date | |
---|---|---|---|
|
4661e7db01 | ||
|
ba1d3dcddc | ||
|
e7f8d5a987 | ||
|
26183c2523 | ||
|
0f7b9363f9 | ||
|
b0b569615e | ||
|
65aa9213d4 | ||
|
44d1f83ee9 | ||
|
b19d109a12 | ||
|
4e52da6a35 | ||
|
c1a686bf47 | ||
|
eb8dd77ab6 | ||
|
cd46286523 | ||
|
e12850be55 | ||
|
5e4f3cabf1 | ||
|
df00b1d69d | ||
|
d74dcfd3b8 | ||
|
c1c720422f | ||
|
bac71fa7cb | ||
|
ac1aa4d591 | ||
|
c22915a08c | ||
|
0ea43289e2 | ||
|
01e527abf1 | ||
|
704a054064 | ||
|
8c693e8739 | ||
|
9ecbf75cb4 | ||
|
591a51aa75 | ||
|
76a1697cf1 | ||
|
1216a0d52d | ||
|
f4d3a4a5ad | ||
|
3c8ad073cd | ||
|
53b9388b82 | ||
|
f26cc9f75b | ||
|
5563ed8084 | ||
|
a02e9206fe | ||
|
e7cc686beb | ||
|
5d4fcbc5a1 | ||
|
ba348c9a00 | ||
|
b0f2471f0e | ||
|
fbdc2b3e20 | ||
|
557139a8cf | ||
|
daea9f3d21 | ||
|
ac23d89a1a | ||
|
3292887cae | ||
|
c7658c0256 | ||
|
716a66e5d3 | ||
|
efd138e752 | ||
|
40857b9859 | ||
|
176df83e02 | ||
|
60b405a7b7 | ||
|
d48a4bbc85 | ||
|
10b08d8840 | ||
|
189ce380bd | ||
|
5f06864582 | ||
|
3ad248b007 | ||
|
754a54adfc | ||
|
960844d87b | ||
|
6bde4e3fb3 | ||
|
3725c80a71 | ||
|
d94f32c160 | ||
|
6b184905e6 | ||
|
782c3dc1c4 | ||
|
f6b806e971 | ||
|
dee0594d74 | ||
|
f8b15a714c | ||
|
d8ab76aa04 | ||
|
8a5139e54c | ||
|
1727b3501f | ||
|
4ed05cf655 | ||
|
8105cd7fbe | ||
|
cf84a6bd3b | ||
|
b80f612d29 | ||
|
5e06ee6ea6 | ||
|
4de5a070e1 | ||
|
b198cd23d0 | ||
|
74e8f58c57 | ||
|
803f89e82b | ||
|
a652a3b3b5 |
1152 changed files with 38776 additions and 39225 deletions
|
@ -18,13 +18,3 @@ skip_list:
|
|||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
|
16
.gitignore
vendored
16
.gitignore
vendored
|
@ -3,10 +3,7 @@
|
|||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
|
@ -14,19 +11,16 @@ contrib/offline/offline-files.tar.gz
|
|||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
deploy.sh
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
!inventory/local
|
||||
!inventory/sample
|
||||
!inventory/c12s-sample
|
||||
inventory/*/artifacts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
|
@ -105,13 +99,3 @@ target/
|
|||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
|
|
@ -8,7 +8,7 @@ stages:
|
|||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.20.0
|
||||
KUBESPRAY_VERSION: v2.14.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
|
@ -16,7 +16,6 @@ variables:
|
|||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
|
@ -27,20 +26,16 @@ variables:
|
|||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
|
@ -54,7 +49,6 @@ before_script:
|
|||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
@ -81,4 +75,3 @@ include:
|
|||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
|
|
@ -14,7 +14,7 @@ vagrant-validate:
|
|||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
VAGRANT_VERSION: 2.2.10
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
@ -23,8 +23,9 @@ ansible-lint:
|
|||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- ansible-lint -v
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
|
@ -52,7 +53,6 @@ tox-inventory-builder:
|
|||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
@ -68,20 +68,6 @@ markdownlint:
|
|||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
|
@ -2,7 +2,6 @@
|
|||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
|
@ -23,60 +22,27 @@
|
|||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_almalinux8-crio:
|
||||
packet_centos8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
@ -85,13 +51,10 @@ packet_ubuntu18-crio:
|
|||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
@ -106,30 +69,27 @@ packet_ubuntu16-flannel-ha:
|
|||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
|
@ -141,32 +101,17 @@ packet_centos7-calico-ha-once-localhost:
|
|||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
packet_fedora32-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
@ -176,14 +121,14 @@ packet_opensuse-canal:
|
|||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
@ -193,18 +138,12 @@ packet_ubuntu18-cilium-sep:
|
|||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha:
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
@ -219,44 +158,34 @@ packet_centos7-calico-ha:
|
|||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-canal-ha:
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora33-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
packet_fedora32-kube-ovn-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
@ -270,46 +199,23 @@ packet_centos7-weave-upgrade-ha:
|
|||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
|
@ -317,7 +223,7 @@ packet_ubuntu18-calico-ha-recover:
|
|||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
|
@ -325,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum:
|
|||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
|
|
|
@ -11,6 +11,6 @@ shellcheck:
|
|||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
- mkdir -p group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
|
@ -28,8 +28,8 @@
|
|||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
|
@ -56,48 +56,70 @@
|
|||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-metal:
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
tf-0.13.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-upcloud:
|
||||
tf-0.13.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# TF_VERSION: 0.12.29
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
|
@ -111,7 +133,7 @@ tf-validate-upcloud:
|
|||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# TF_VERSION: 0.12.29
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
|
@ -146,6 +168,10 @@ tf-validate-upcloud:
|
|||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
# Since ELASTX is in Stockholm, Mitogen helps with latency
|
||||
MITOGEN_ENABLE: "false"
|
||||
# Mitogen doesn't support interpreter discovery yet
|
||||
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
|
@ -162,10 +188,9 @@ tf-elastx_ubuntu18-calico:
|
|||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
|
@ -191,45 +216,44 @@ tf-elastx_ubuntu18-calico:
|
|||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
tf-ovh_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
|
|
@ -1,5 +1,21 @@
|
|||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
|
@ -15,19 +31,12 @@
|
|||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
|
@ -43,25 +52,3 @@ vagrant_ubuntu20-flannel:
|
|||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
repos:
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
|
@ -6,22 +6,11 @@
|
|||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
|
||||
#### Molecule
|
||||
|
||||
|
@ -38,9 +27,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
|||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
|
|
46
Dockerfile
46
Dockerfile
|
@ -1,37 +1,25 @@
|
|||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220531
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ENV KUBE_VERSION=v1.19.9
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,7 +1,5 @@
|
|||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
|
|
@ -4,23 +4,15 @@ aliases:
|
|||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- woopstar
|
||||
|
|
119
README.md
119
README.md
|
@ -5,7 +5,7 @@
|
|||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
|
@ -19,10 +19,10 @@ To deploy the cluster you can use :
|
|||
|
||||
#### Usage
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
|
@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
|
@ -48,24 +48,11 @@ As a consequence, `ansible-playbook` command will fail with:
|
|||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.20.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
|
@ -76,11 +63,10 @@ python -V && pip -V
|
|||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
Install the necessary requirements
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
|
@ -107,85 +93,61 @@ vagrant up
|
|||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Equinix Metal](docs/equinix-metal.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.14
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.19.9
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.3.9
|
||||
- [cri-o](http://cri-o.io/) v1.19 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.16.9
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.8
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.1.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.16
|
||||
- [helm](https://helm.sh/) v3.9.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.7.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.41.2
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
Note: The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.23**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.18**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is not supported for now**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
|
@ -215,6 +177,8 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
|||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
|
@ -235,9 +199,9 @@ See also [Network checker](docs/netcheck.md).
|
|||
|
||||
## Ingress Plugins
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
## Community docs and resources
|
||||
|
||||
|
@ -250,12 +214,11 @@ See also [Network checker](docs/netcheck.md).
|
|||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
|
53
RELEASE.md
53
RELEASE.md
|
@ -2,18 +2,17 @@
|
|||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
|
@ -47,37 +46,3 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
|
|
|
@ -9,7 +9,5 @@
|
|||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
|
52
Vagrantfile
vendored
52
Vagrantfile
vendored
|
@ -26,12 +26,9 @@ SUPPORTED_OS = {
|
|||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
|
@ -52,13 +49,12 @@ $vm_cpus ||= 2
|
|||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$multi_networking ||= false
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
$download_force_cache ||= "True"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
|
@ -71,12 +67,9 @@ $kube_node_instances_with_disks_size ||= "20G"
|
|||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
|
||||
|
@ -92,9 +85,9 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
|||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
|
@ -173,7 +166,7 @@ Vagrant.configure("2") do |config|
|
|||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -201,22 +194,11 @@ Vagrant.configure("2") do |config|
|
|||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
node.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
|
@ -244,11 +226,9 @@ Vagrant.configure("2") do |config|
|
|||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
|
@ -258,14 +238,12 @@ Vagrant.configure("2") do |config|
|
|||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
[ssh_connection]
|
||||
pipelining=True
|
||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
|
@ -10,11 +11,11 @@ host_key_checking=False
|
|||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
|
|
@ -3,31 +3,15 @@
|
|||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.14.0
|
||||
minimal_ansible_version: 2.9.0
|
||||
maximal_ansible_version: 2.10.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
that: "'127.0.0.1' | ipaddr"
|
||||
tags:
|
||||
- check
|
||||
|
||||
# CentOS 7 provides too old jinja version
|
||||
- name: "Check that jinja is not too old (install via pip)"
|
||||
assert:
|
||||
msg: "Your Jinja version is too old, install via pip"
|
||||
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
|
||||
tags:
|
||||
- check
|
||||
|
|
46
cluster.yml
46
cluster.yml
|
@ -2,9 +2,6 @@
|
|||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -12,7 +9,7 @@
|
|||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
- hosts: k8s-cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
|
@ -25,17 +22,17 @@
|
|||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
- hosts: k8s-cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd:kube_control_plane
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -46,9 +43,9 @@
|
|||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -59,12 +56,9 @@
|
|||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -72,27 +66,27 @@
|
|||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico_rr
|
||||
- hosts: calico-rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -100,7 +94,7 @@
|
|||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -108,7 +102,7 @@
|
|||
- { role: kubespray-defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -119,10 +113,16 @@
|
|||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
|
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
|||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
|
@ -69,8 +69,8 @@ class SearchEC2Tags(object):
|
|||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
|
|
@ -47,10 +47,6 @@ If you need to delete all resources from a resource group, simply call:
|
|||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
@ -63,5 +59,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
|||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
|
|
@ -12,4 +12,3 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,13 +21,13 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,14 +21,14 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
|
|
|
@ -8,13 +8,11 @@
|
|||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
|
|
@ -144,7 +144,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_control_plane,etcd"
|
||||
"roles": "kube-master,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_node"
|
||||
"roles": "kube-node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
@ -112,4 +112,4 @@
|
|||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -35,7 +35,6 @@
|
|||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
@ -64,7 +63,6 @@
|
|||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
|
|
|
@ -17,7 +17,7 @@ pass_or_fail() {
|
|||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
|
@ -46,7 +46,7 @@ test_distro() {
|
|||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
|||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
|
|
@ -44,11 +44,11 @@ import re
|
|||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
|
@ -63,9 +63,7 @@ def get_var_as_bool(name, default):
|
|||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
|
@ -82,54 +80,32 @@ class KubesprayInventory(object):
|
|||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
|
@ -179,29 +155,17 @@ class KubesprayInventory(object):
|
|||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
def build_hostnames(self, changed_hosts):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
|
@ -209,7 +173,6 @@ class KubesprayInventory(object):
|
|||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
|
@ -218,8 +181,6 @@ class KubesprayInventory(object):
|
|||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
|
@ -239,15 +200,11 @@ class KubesprayInventory(object):
|
|||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
|
@ -266,7 +223,6 @@ class KubesprayInventory(object):
|
|||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
|
@ -310,7 +266,7 @@ class KubesprayInventory(object):
|
|||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
|
@ -331,54 +287,52 @@ class KubesprayInventory(object):
|
|||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
elif group != 'k8s-cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
self.add_host_to_group('kube-master', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
|
@ -435,11 +389,9 @@ help - Display this message
|
|||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
|
@ -450,9 +402,9 @@ Configurable env vars:
|
|||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
KUBE_MASTERS Set the number of kube-masters. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
|
@ -473,7 +425,6 @@ def main(argv=None):
|
|||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -13,9 +13,8 @@
|
|||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
@ -27,28 +26,6 @@ if path not in sys.path:
|
|||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
|
@ -90,14 +67,23 @@ class TestInventory(unittest.TestCase):
|
|||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
|
@ -113,30 +99,6 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
|
@ -151,24 +113,7 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
|
@ -277,11 +222,11 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
|
@ -296,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
|
@ -306,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
|
@ -330,12 +275,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
|
@ -346,12 +291,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
@ -368,7 +313,7 @@ class TestInventory(unittest.TestCase):
|
|||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
|
@ -377,7 +322,17 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
|
@ -386,210 +341,6 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
|||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
|
@ -23,16 +23,16 @@
|
|||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
|
|
@ -8,22 +8,18 @@ Installs and configures GlusterFS on Linux.
|
|||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
glusterfs_default_release: ""
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
|
@ -33,11 +29,9 @@ None.
|
|||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
yum: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
|
@ -82,7 +82,6 @@
|
|||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- { role: prepare }
|
||||
- role_under_test
|
|
@ -3,13 +3,12 @@
|
|||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
|
@ -20,4 +19,4 @@
|
|||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
|
|
@ -2,25 +2,18 @@ all:
|
|||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
k8s-cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube-master:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
kube-node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: 0640
|
||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: 0644
|
||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
|
@ -30,10 +27,7 @@
|
|||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
|
@ -15,7 +15,7 @@
|
|||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
|
@ -28,10 +28,9 @@
|
|||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
when: "secret_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
|
@ -41,5 +40,5 @@
|
|||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
that: "secret_state.stdout != \"\""
|
||||
msg: "Heketi config secret is not present."
|
||||
|
|
|
@ -2,10 +2,7 @@
|
|||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
|
|
|
@ -73,8 +73,8 @@
|
|||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
@ -84,8 +84,8 @@
|
|||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
@ -19,7 +19,7 @@
|
|||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
|
@ -35,11 +35,11 @@
|
|||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
|
|
@ -1,51 +1,51 @@
|
|||
---
|
||||
- name: Remove storage class. # noqa 301
|
||||
- name: "Remove storage class." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
- name: "Tear down glusterfs." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding" # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret" # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup" # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account" # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
- name: "Remove heketi storage secret"
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
|
|
@ -1,16 +1,11 @@
|
|||
# Offline deployment
|
||||
|
||||
## manage-offline-container-images.sh
|
||||
|
||||
Container image collecting script for offline deployment
|
||||
# Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment. if images are from a private registry,
|
||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||
to the target offline environment.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
|
@ -24,42 +19,3 @@ Step(2) can be operated with:
|
|||
```shell
|
||||
manage-offline-container-images.sh register
|
||||
```
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── files.list.template
|
||||
├── images.list
|
||||
└── images.list.template
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
## manage-offline-files.sh
|
||||
|
||||
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
|
||||
|
||||
Step(1) generate `files.list`
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
```
|
||||
|
||||
Step(2) download files and run nginx container
|
||||
|
||||
```shell
|
||||
./manage-offline-files.sh
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# generate all download files url template
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
|
||||
|
||||
# generate all images list template
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# run ansible to expand templates
|
||||
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
|
||||
|
||||
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
- role: kubespray-defaults
|
||||
when: false
|
||||
- role: download
|
||||
when: false
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
with_items:
|
||||
- files
|
||||
- images
|
|
@ -15,7 +15,7 @@ function create_container_image_tar() {
|
|||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
|
@ -46,16 +46,15 @@ function create_container_image_tar() {
|
|||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "registry.k8s.io"
|
||||
# - kube_image_repo: "k8s.gcr.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
|
@ -101,35 +100,15 @@ function register_container_images() {
|
|||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
set +e
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
set -e
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${raw_image}" ]; then
|
||||
echo "Failed to get raw_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${org_image}" ]; then
|
||||
echo "Failed to get org_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${image_id}" ]; then
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
org_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${org_image}"
|
||||
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
|
@ -153,8 +132,7 @@ else
|
|||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment. if images are from"
|
||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
|
||||
OFFLINE_FILES_DIR_NAME="offline-files"
|
||||
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
|
||||
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
|
||||
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
|
||||
NGINX_PORT=8080
|
||||
|
||||
# download files
|
||||
if [ ! -f "${FILES_LIST}" ]; then
|
||||
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "${OFFLINE_FILES_DIR}"
|
||||
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||
mkdir "${OFFLINE_FILES_DIR}"
|
||||
|
||||
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
||||
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||
|
||||
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||
|
||||
# run nginx container server
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
|
@ -1,39 +0,0 @@
|
|||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
default_type application/octet-stream;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
include /etc/nginx/default.d/*.conf;
|
||||
location / {
|
||||
root /usr/share/nginx/html/download;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
disable_service_firewall: false
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
- name: Disable service firewalld
|
||||
systemd:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
name: ufw
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
|
@ -9,8 +9,8 @@ Summary: Ansible modules for installing Kubernetes
|
|||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-sigs/kubespray
|
||||
Source0: https://github.com/kubernetes-sigs/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
|
@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
|||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
|
|
1
contrib/terraform/aws/.gitignore
vendored
1
contrib/terraform/aws/.gitignore
vendored
|
@ -1,3 +1,2 @@
|
|||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
|
|
@ -36,7 +36,8 @@ terraform apply -var-file=credentials.tfvars
|
|||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
@ -121,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
|
|||
|
||||
```commandline
|
||||
# Get the controller's IP address.
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||
|
||||
# Get the hostname of the load balancer.
|
||||
|
|
|
@ -20,20 +20,20 @@ module "aws-vpc" {
|
|||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_cidr_block = var.aws_vpc_cidr_block
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_cidr_subnets_private = var.aws_cidr_subnets_private
|
||||
aws_cidr_subnets_public = var.aws_cidr_subnets_public
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-nlb" {
|
||||
source = "./modules/nlb"
|
||||
module "aws-elb" {
|
||||
source = "./modules/elb"
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_id = module.aws-vpc.aws_vpc_id
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
|
||||
aws_nlb_api_port = var.aws_nlb_api_port
|
||||
aws_elb_api_port = var.aws_elb_api_port
|
||||
k8s_secure_api_port = var.k8s_secure_api_port
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
@ -52,19 +52,20 @@ module "aws-iam" {
|
|||
resource "aws_instance" "bastion-server" {
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_bastion_size
|
||||
count = var.aws_bastion_num
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
associate_public_ip_address = true
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||
Cluster = var.aws_cluster_name
|
||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", var.aws_cluster_name,
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -78,28 +79,25 @@ resource "aws_instance" "k8s-master" {
|
|||
|
||||
count = var.aws_kube_master_num
|
||||
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_kube_master_disk_size
|
||||
}
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "master"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn
|
||||
target_id = element(aws_instance.k8s-master.*.private_ip, count.index)
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
elb = module.aws-elb.aws_elb_api_id
|
||||
instance = element(aws_instance.k8s-master.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
|
@ -108,21 +106,18 @@ resource "aws_instance" "k8s-etcd" {
|
|||
|
||||
count = var.aws_etcd_num
|
||||
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_etcd_disk_size
|
||||
}
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "etcd"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
|
@ -131,22 +126,19 @@ resource "aws_instance" "k8s-worker" {
|
|||
|
||||
count = var.aws_kube_worker_num
|
||||
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_kube_worker_disk_size
|
||||
}
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube-worker-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "worker"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -160,11 +152,11 @@ data "template_file" "inventory" {
|
|||
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
|
||||
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
|
||||
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
|
||||
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
|
||||
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
57
contrib/terraform/aws/modules/elb/main.tf
Normal file
57
contrib/terraform/aws/modules/elb/main.tf
Normal file
|
@ -0,0 +1,57 @@
|
|||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = var.aws_elb_api_port
|
||||
to_port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = var.aws_subnet_ids_public
|
||||
security_groups = [aws_security_group.aws-elb.id]
|
||||
|
||||
listener {
|
||||
instance_port = var.k8s_secure_api_port
|
||||
instance_protocol = "tcp"
|
||||
lb_port = var.aws_elb_api_port
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
cross_zone_load_balancing = true
|
||||
idle_timeout = 400
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))
|
||||
}
|
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
|
@ -0,0 +1,7 @@
|
|||
output "aws_elb_api_id" {
|
||||
value = aws_elb.aws-elb-api.id
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = aws_elb.aws-elb-api.dns_name
|
||||
}
|
|
@ -6,8 +6,8 @@ variable "aws_vpc_id" {
|
|||
description = "AWS VPC ID"
|
||||
}
|
||||
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
|
@ -1,6 +1,6 @@
|
|||
#Add AWS Roles for Kubernetes
|
||||
|
||||
resource "aws_iam_role" "kube_control_plane" {
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
|
@ -40,9 +40,9 @@ EOF
|
|||
|
||||
#Add AWS Policies for Kubernetes
|
||||
|
||||
resource "aws_iam_role_policy" "kube_control_plane" {
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = aws_iam_role.kube_control_plane.id
|
||||
role = aws_iam_role.kube-master.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
|
@ -130,9 +130,9 @@ EOF
|
|||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube_control_plane" {
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = aws_iam_role.kube_control_plane.name
|
||||
role = aws_iam_role.kube-master.name
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
output "kube_control_plane-profile" {
|
||||
value = aws_iam_instance_profile.kube_control_plane.name
|
||||
output "kube-master-profile" {
|
||||
value = aws_iam_instance_profile.kube-master.name
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
# Create a new AWS NLB for K8S API
|
||||
resource "aws_lb" "aws-nlb-api" {
|
||||
name = "kubernetes-nlb-${var.aws_cluster_name}"
|
||||
load_balancer_type = "network"
|
||||
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
|
||||
idle_timeout = 400
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
|
||||
}))
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Instance Target Group
|
||||
resource "aws_lb_target_group" "aws-nlb-api-tg" {
|
||||
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
|
||||
port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
target_type = "ip"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
interval = 30
|
||||
protocol = "HTTPS"
|
||||
path = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Listener listen to target group
|
||||
resource "aws_lb_listener" "aws-nlb-api-listener" {
|
||||
load_balancer_arn = aws_lb.aws-nlb-api.arn
|
||||
port = var.aws_nlb_api_port
|
||||
protocol = "TCP"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
output "aws_nlb_api_id" {
|
||||
value = aws_lb.aws-nlb-api.id
|
||||
}
|
||||
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = aws_lb.aws-nlb-api.dns_name
|
||||
}
|
||||
|
||||
output "aws_nlb_api_tg_arn" {
|
||||
value = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
|
@ -5,9 +5,9 @@ resource "aws_vpc" "cluster-vpc" {
|
|||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
|
@ -18,22 +18,21 @@ resource "aws_eip" "cluster-nat-eip" {
|
|||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
|
@ -44,15 +43,13 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
|
|||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
|
@ -67,9 +64,9 @@ resource "aws_route_table" "kubernetes-public" {
|
|||
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
|
@ -81,9 +78,9 @@ resource "aws_route_table" "kubernetes-private" {
|
|||
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
|
@ -104,9 +101,9 @@ resource "aws_security_group" "kubernetes" {
|
|||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
}))
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
|
|
@ -11,11 +11,11 @@ output "workers" {
|
|||
}
|
||||
|
||||
output "etcd" {
|
||||
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
|
||||
value = join("\n", aws_instance.k8s-etcd.*.private_ip)
|
||||
}
|
||||
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}"
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
}
|
||||
|
||||
output "inventory" {
|
||||
|
|
|
@ -9,8 +9,6 @@ aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
|||
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_num = 1
|
||||
|
||||
aws_bastion_size = "t2.medium"
|
||||
|
||||
#Kubernetes Cluster
|
||||
|
@ -19,26 +17,22 @@ aws_kube_master_num = 3
|
|||
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_kube_master_disk_size = 50
|
||||
|
||||
aws_etcd_num = 3
|
||||
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_etcd_disk_size = 50
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_disk_size = 50
|
||||
#Settings AWS ELB
|
||||
|
||||
#Settings AWS NLB
|
||||
|
||||
aws_nlb_api_port = 6443
|
||||
aws_elb_api_port = 6443
|
||||
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest" # Product = "kubernetes"
|
||||
}
|
||||
|
|
|
@ -7,21 +7,22 @@ ${public_ip_address_bastion}
|
|||
[bastion]
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
|
||||
[kube-node]
|
||||
${list_node}
|
||||
|
||||
|
||||
[etcd]
|
||||
${list_etcd}
|
||||
|
||||
[calico_rr]
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
calico_rr
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
[k8s_cluster:vars]
|
||||
${nlb_api_fqdn}
|
||||
|
||||
[k8s-cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
|
|
|
@ -6,34 +6,26 @@ aws_vpc_cidr_block = "10.250.192.0/18"
|
|||
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||
|
||||
# single AZ deployment
|
||||
#aws_cidr_subnets_private = ["10.250.192.0/20"]
|
||||
#aws_cidr_subnets_public = ["10.250.224.0/20"]
|
||||
|
||||
# 3+ AZ deployment
|
||||
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
|
||||
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_num = 1
|
||||
aws_bastion_size = "t3.small"
|
||||
aws_bastion_size = "t2.medium"
|
||||
|
||||
|
||||
#Kubernetes Cluster
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t3.medium"
|
||||
aws_kube_master_disk_size = 50
|
||||
|
||||
aws_etcd_num = 0
|
||||
aws_etcd_size = "t3.medium"
|
||||
aws_etcd_disk_size = 50
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest"
|
||||
|
|
|
@ -8,26 +8,25 @@ aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
|||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_num = 1
|
||||
aws_bastion_size = "t3.small"
|
||||
aws_bastion_ami = "ami-5900cc36"
|
||||
aws_bastion_size = "t2.small"
|
||||
|
||||
|
||||
#Kubernetes Cluster
|
||||
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t3.medium"
|
||||
aws_kube_master_disk_size = 50
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t3.medium"
|
||||
aws_etcd_disk_size = 50
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_cluster_ami = "ami-903df7ff"
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_nlb_api_port = 6443
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = { }
|
||||
|
||||
inventory_file = "../../../inventory/hosts"
|
||||
kube_insecure_apiserver_address = 0.0.0.0
|
||||
|
|
|
@ -25,7 +25,7 @@ data "aws_ami" "distro" {
|
|||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-10-amd64-*"]
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
|
@ -33,7 +33,7 @@ data "aws_ami" "distro" {
|
|||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["136693071363"] # Debian-10
|
||||
owners = ["099720109477"] # Canonical
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
@ -63,18 +63,10 @@ variable "aws_bastion_size" {
|
|||
* The number should be divisable by the number of used
|
||||
* AWS Availability Zones without an remainder.
|
||||
*/
|
||||
variable "aws_bastion_num" {
|
||||
description = "Number of Bastion Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_num" {
|
||||
description = "Number of Kubernetes Master Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_disk_size" {
|
||||
description = "Disk size for Kubernetes Master Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_size" {
|
||||
description = "Instance size of Kube Master Nodes"
|
||||
}
|
||||
|
@ -83,10 +75,6 @@ variable "aws_etcd_num" {
|
|||
description = "Number of etcd Nodes"
|
||||
}
|
||||
|
||||
variable "aws_etcd_disk_size" {
|
||||
description = "Disk size for etcd Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_etcd_size" {
|
||||
description = "Instance size of etcd Nodes"
|
||||
}
|
||||
|
@ -95,20 +83,16 @@ variable "aws_kube_worker_num" {
|
|||
description = "Number of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_disk_size" {
|
||||
description = "Disk size for Kubernetes Worker Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_size" {
|
||||
description = "Instance size of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS NLB Settings
|
||||
* AWS ELB Settings
|
||||
*
|
||||
*/
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
# Kubernetes on Exoscale with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Master/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| Ingress LB +---------> | | | |
|
||||
| | | | | Worker | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: *Assumes you are at the root of the kubespray repo*
|
||||
|
||||
Copy the sample inventory for your cluster and copy the default terraform variables.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-exoscale-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
|
||||
|
||||
```bash
|
||||
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
|
||||
$EDITOR default.tfvars
|
||||
```
|
||||
|
||||
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
|
||||
The file should look like something like this:
|
||||
|
||||
```ini
|
||||
[cloudstack]
|
||||
key = <API key>
|
||||
secret = <API secret>
|
||||
```
|
||||
|
||||
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
|
||||
|
||||
### Encrypted credentials
|
||||
|
||||
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
|
||||
|
||||
```bash
|
||||
cat << EOF > cloudstack.ini
|
||||
[cloudstack]
|
||||
key =
|
||||
secret =
|
||||
EOF
|
||||
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
|
||||
sops cloudstack.ini
|
||||
```
|
||||
|
||||
Run terraform to create the infrastructure
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
If your cloudstack credentials file is encrypted using sops, run the following:
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can now copy your inventory file and use it with kubespray to set up a cluster.
|
||||
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
Example to use this with the default sample inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
|
||||
|
||||
```bash
|
||||
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: The size to use
|
||||
* `boot_disk`: The boot disk to use
|
||||
* `image_name`: Name of the image
|
||||
* `root_partition_size`: Size *(in GB)* for the root partition
|
||||
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
|
||||
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
||||
## Known limitations
|
||||
|
||||
### Only single disk
|
||||
|
||||
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
|
||||
|
||||
### No Kubernetes API
|
||||
|
||||
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
|
||||
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.
|
|
@ -1,65 +0,0 @@
|
|||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
|
@ -1,49 +0,0 @@
|
|||
provider "exoscale" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
zone = var.zone
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
data "exoscale_compute_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
start_ip = cidrhost(var.private_network_cidr, 1)
|
||||
# cidr -1 = Broadcast address
|
||||
# cidr -2 = DHCP server address (exoscale specific)
|
||||
end_ip = cidrhost(var.private_network_cidr, -3)
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "master"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "worker"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
name = "${var.prefix}-worker-sg"
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_network_cidr
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
#cloud-config
|
||||
%{ if ceph_partition_size > 0 || node_local_partition_size > 0}
|
||||
bootcmd:
|
||||
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ]
|
||||
%{ if node_local_partition_size > 0 }
|
||||
# Create partition for node local storage
|
||||
- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ]
|
||||
- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ]
|
||||
%{ endif }
|
||||
%{ if ceph_partition_size > 0 }
|
||||
# Create partition for rook to use for ceph
|
||||
- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ]
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/eth1.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
eth1:
|
||||
dhcp4: true
|
||||
%{ if node_type == "worker" }
|
||||
# TODO: When a VM is seen as healthy and is added to the EIP loadbalancer
|
||||
# pool it no longer can send traffic back to itself via the EIP IP
|
||||
# address.
|
||||
# Remove this if it ever gets solved.
|
||||
- path: /etc/netplan/20-eip-fix.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
"lo:0":
|
||||
match:
|
||||
name: lo
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- ${eip_ip_address}/32
|
||||
%{ endif }
|
||||
runcmd:
|
||||
- netplan apply
|
||||
%{ if node_local_partition_size > 0 }
|
||||
- mkdir -p /mnt/disks/node-local-storage
|
||||
- chown nobody:nogroup /mnt/disks/node-local-storage
|
||||
- mount /dev/vda2 /mnt/disks/node-local-storage
|
||||
%{ endif }
|
|
@ -1,42 +0,0 @@
|
|||
variable "zone" {
|
||||
type = string
|
||||
# This is currently the only zone that is supposed to be supporting
|
||||
# so called "managed private networks".
|
||||
# See: https://www.exoscale.com/syslog/introducing-managed-private-networks
|
||||
default = "ch-gva-2"
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "172.0.10.0/24"
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = module.kubernetes.ingress_controller_lb_ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Small",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
|
@ -1,19 +0,0 @@
|
|||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[kube_control_plane:vars]
|
||||
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
|
@ -1,46 +0,0 @@
|
|||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue