Compare commits

..

10 commits

Author SHA1 Message Date
Florian Ruynat
75d648cae5
Fix unintended SIGPIPE (#6817) 2020-10-21 03:24:20 -07:00
bozzo
087d9c204f
Fix cinder & external_openstack cacert deployment (#6745) (#6832)
The CA cert was only deployed on master nodes
2020-10-21 01:48:20 -07:00
Florian Ruynat
775cadda62
Update hashes and set default to 1.18.10 (#6842) 2020-10-21 01:30:23 -07:00
Kenichi Omichi
19c000c127
Set ansible_python_interpreter to python3 on debian (fix error with mitogen) (#6633) (#6744)
Co-authored-by: Florian Ruynat <16313165+floryut@users.noreply.github.com>
2020-10-01 06:16:54 -07:00
lukasz bielinski
b39a196cfb
properly generate extravolumes in kubeadmconfig for centos (#6707) 2020-09-23 01:42:08 -07:00
Florent Monbillard
9fc14b3e6c
Make sure node_ip is set if node is in etcd group (#6720) 2020-09-23 00:46:08 -07:00
Florian Ruynat
f9a7dce7ca
Add Kubernetes hashes 1.19.2/1.18.9/1.17.12 and set default (#6699) 2020-09-18 14:44:28 -07:00
Florian Ruynat
fbbbd90732
fix kubelet_flexvolumes_plugins_dir undefined (#6645) (#6670)
Co-authored-by: w33dw0r7d <w33dw0r7d@gmail.com>
2020-09-18 02:14:46 -07:00
Florian Ruynat
9869b46432
Move from widehat.opensuse to download.opensuse for crio centos (#6682) (#6704) 2020-09-18 02:04:45 -07:00
spaced
6cd33700f5
NetworkManager lists must be separated by , (#6649) 2020-09-11 00:30:15 -07:00
1254 changed files with 37073 additions and 41609 deletions

View file

@ -18,13 +18,3 @@ skip_list:
# While it can be useful to have these metadata available, they are also available in the existing documentation. # While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019) # (Disabled in May 2019)
- '701' - '701'
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
# Meta roles in Kubespray don't need proper names
# (Disabled in June 2021)
- 'role-name'
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
# In Kubespray we use variables that use camelCase to match their k8s counterparts
# (Disabled in June 2021)
- 'var-naming'

View file

@ -1,7 +1,7 @@
--- ---
name: Support Request name: Support Request
about: Support request or question relating to Kubespray about: Support request or question relating to Kubespray
labels: kind/support labels: triage/support
--- ---

17
.gitignore vendored
View file

@ -3,10 +3,7 @@
**/vagrant_ansible_inventory **/vagrant_ansible_inventory
*.iml *.iml
temp temp
contrib/offline/offline-files
contrib/offline/offline-files.tar.gz
.idea .idea
.vscode
.tox .tox
.cache .cache
*.bak *.bak
@ -14,19 +11,15 @@ contrib/offline/offline-files.tar.gz
*.tfstate.backup *.tfstate.backup
.terraform/ .terraform/
contrib/terraform/aws/credentials.tfvars contrib/terraform/aws/credentials.tfvars
.terraform.lock.hcl
/ssh-bastion.conf /ssh-bastion.conf
**/*.sw[pon] **/*.sw[pon]
*~ *~
vagrant/ vagrant/
plugins/mitogen
deploy.sh
# Ansible inventory # Ansible inventory
inventory/* inventory/*
!inventory/local !inventory/local
!inventory/sample !inventory/sample
!inventory/c12s-sample
inventory/*/artifacts/ inventory/*/artifacts/
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
@ -105,13 +98,3 @@ target/
# virtualenv # virtualenv
venv/ venv/
ENV/ ENV/
# molecule
roles/**/molecule/**/__pycache__/
# macOS
.DS_Store
# Temp location used by our scripts
scripts/tmp/
tmp.md

View file

@ -8,15 +8,13 @@ stages:
- deploy-special - deploy-special
variables: variables:
KUBESPRAY_VERSION: v2.20.0 KUBESPRAY_VERSION: v2.13.3
FAILFASTCI_NAMESPACE: 'kargo-ci' FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true" ANSIBLE_FORCE_COLOR: "true"
MAGIC: "ci check this" MAGIC: "ci check this"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID" TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml" CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
GS_ACCESS_KEY_ID: $GS_KEY GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET GS_SECRET_ACCESS_KEY: $GS_SECRET
CONTAINER_ENGINE: docker CONTAINER_ENGINE: docker
@ -27,20 +25,16 @@ variables:
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false" IDEMPOT_CHECK: "false"
RESET_CHECK: "false" RESET_CHECK: "false"
REMOVE_NODE_CHECK: "false"
UPGRADE_TEST: "false" UPGRADE_TEST: "false"
MITOGEN_ENABLE: "false" MITOGEN_ENABLE: "false"
ANSIBLE_LOG_LEVEL: "-vv" ANSIBLE_LOG_LEVEL: "-vv"
RECOVER_CONTROL_PLANE_TEST: "false" RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
TERRAFORM_VERSION: 1.0.8
ANSIBLE_MAJOR_VERSION: "2.11"
before_script: before_script:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- python -m pip uninstall -y ansible ansible-base ansible-core - python -m pip install -r tests/requirements.txt
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
- mkdir -p /.ssh - mkdir -p /.ssh
.job: &job .job: &job
@ -54,7 +48,6 @@ before_script:
.testcases: &testcases .testcases: &testcases
<<: *job <<: *job
retry: 1
before_script: before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
@ -81,4 +74,3 @@ include:
- .gitlab-ci/terraform.yml - .gitlab-ci/terraform.yml
- .gitlab-ci/packet.yml - .gitlab-ci/packet.yml
- .gitlab-ci/vagrant.yml - .gitlab-ci/vagrant.yml
- .gitlab-ci/molecule.yml

View file

@ -14,7 +14,7 @@ vagrant-validate:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]
variables: variables:
VAGRANT_VERSION: 2.2.19 VAGRANT_VERSION: 2.2.4
script: script:
- ./tests/scripts/vagrant-validate.sh - ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master'] except: ['triggers', 'master']
@ -23,8 +23,9 @@ ansible-lint:
extends: .job extends: .job
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]
script: # lint every yml/yaml file that looks like it contains Ansible plays
- ansible-lint -v script: |-
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
except: ['triggers', 'master'] except: ['triggers', 'master']
syntax-check: syntax-check:
@ -52,7 +53,6 @@ tox-inventory-builder:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip - apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
script: script:
- pip3 install tox - pip3 install tox
@ -64,23 +64,9 @@ markdownlint:
tags: [light] tags: [light]
image: node image: node
before_script: before_script:
- npm install -g markdownlint-cli@0.22.0 - npm install -g markdownlint-cli
script: script:
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md - markdownlint README.md docs --ignore docs/_sidebar.md
check-readme-versions:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_readme_versions.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix: ci-matrix:
stage: unit-tests stage: unit-tests

View file

@ -1,86 +0,0 @@
---
.molecule:
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
services: []
stage: deploy-part1
before_script:
- tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/molecule_run.sh
after_script:
- chronic ./tests/scripts/molecule_logs.sh
artifacts:
when: always
paths:
- molecule_logs/
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.molecule_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .molecule
molecule_full:
extends: .molecule_periodic
molecule_no_container_engines:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -e container-engine
when: on_success
molecule_docker:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success
molecule_containerd:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
when: on_success
molecule_cri-o:
extends: .molecule
stage: deploy-part2
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
when: on_success
# Stage 3 container engines don't get as much attention so allow them to fail
molecule_kata:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
when: on_success
molecule_gvisor:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
when: on_success
molecule_youki:
extends: .molecule
stage: deploy-part3
allow_failure: true
script:
- ./tests/scripts/molecule_run.sh -i container-engine/youki
when: on_success

View file

@ -1,139 +1,88 @@
--- ---
.packet: .packet: &packet
extends: .testcases extends: .testcases
variables: variables:
ANSIBLE_TIMEOUT: "120" CI_PLATFORM: "packet"
CI_PLATFORM: packet SSH_USER: "kubespray"
SSH_USER: kubespray
tags: tags:
- packet - packet
except: [triggers]
# CI template for PRs
.packet_pr:
only: [/^pr-.*$/] only: [/^pr-.*$/]
extends: .packet except: ['triggers']
# CI template for periodic CI jobs packet_ubuntu18-calico-aio:
# Enabled when PERIODIC_CI_ENABLED var is set stage: deploy-part1
.packet_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .packet extends: .packet
when: on_success
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken # Future AIO job
packet_ubuntu20-calico-aio: packet_ubuntu20-calico-aio:
stage: deploy-part1 stage: deploy-part1
extends: .packet_pr extends: .packet
when: on_success when: on_success
variables:
RESET_CHECK: "true"
packet_ubuntu20-calico-aio-ansible-2_11:
stage: deploy-part1
extends: .packet_periodic
when: on_success
variables:
ANSIBLE_MAJOR_VERSION: "2.11"
RESET_CHECK: "true"
# ### PR JOBS PART2 # ### PR JOBS PART2
packet_ubuntu18-aio-docker: packet_centos7-flannel-containerd-addons-ha:
extends: .packet
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr
when: on_success when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_ubuntu20-aio-docker: packet_centos7-crio:
stage: deploy-part2 extends: .packet
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-aio-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu18-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-aio-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-aio:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_almalinux8-crio:
extends: .packet_pr
stage: deploy-part2 stage: deploy-part2
when: on_success when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_ubuntu18-crio: packet_ubuntu18-crio:
extends: .packet_pr extends: .packet
stage: deploy-part2 stage: deploy-part2
when: manual when: manual
variables:
MITOGEN_ENABLE: "true"
packet_fedora35-crio: packet_ubuntu16-canal-kubeadm-ha:
extends: .packet_pr
stage: deploy-part2 stage: deploy-part2
when: manual extends: .packet
packet_ubuntu16-canal-ha:
stage: deploy-part2
extends: .packet_periodic
when: on_success when: on_success
packet_ubuntu16-canal-sep: packet_ubuntu16-canal-sep:
stage: deploy-special stage: deploy-special
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu16-flannel-ha: packet_ubuntu16-flannel-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual
packet_ubuntu16-kube-router-sep:
stage: deploy-part2
extends: .packet
when: manual
packet_ubuntu16-kube-router-svc-proxy:
stage: deploy-part2
extends: .packet
when: manual when: manual
packet_debian10-cilium-svc-proxy: packet_debian10-cilium-svc-proxy:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: manual
packet_debian10-calico: packet_debian10-containerd:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success
packet_debian10-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_centos7-calico-ha-once-localhost: packet_centos7-calico-ha-once-localhost:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
variables: variables:
# This will instruct Docker not to start over TLS. # This will instruct Docker not to start over TLS.
@ -141,124 +90,99 @@ packet_centos7-calico-ha-once-localhost:
services: services:
- docker:19.03.9-dind - docker:19.03.9-dind
packet_almalinux8-kube-ovn: packet_centos8-kube-ovn:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
packet_almalinux8-calico: packet_centos8-calico:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
packet_rockylinux8-calico: packet_fedora32-weave:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora36-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success when: on_success
packet_opensuse-canal: packet_opensuse-canal:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
packet_opensuse-docker-cilium: packet_ubuntu18-ovn4nfv:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: on_success
# Contiv does not work in k8s v1.16
# packet_ubuntu16-contiv-sep:
# stage: deploy-part2
# extends: .packet
# when: on_success
# ### MANUAL JOBS # ### MANUAL JOBS
packet_ubuntu16-docker-weave-sep: packet_ubuntu16-weave-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-cilium-sep: packet_ubuntu18-cilium-sep:
stage: deploy-special stage: deploy-special
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-flannel-ha: packet_ubuntu18-flannel-containerd-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-flannel-ha-once: packet_ubuntu18-flannel-containerd-ha-once:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual
# Calico HA eBPF
packet_almalinux8-calico-ha-ebpf:
stage: deploy-part2
extends: .packet_pr
when: manual when: manual
packet_debian9-macvlan: packet_debian9-macvlan:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_centos7-calico-ha: packet_centos7-calico-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual
packet_centos7-kube-router:
stage: deploy-part2
extends: .packet
when: manual when: manual
packet_centos7-multus-calico: packet_centos7-multus-calico:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_centos7-canal-ha: packet_oracle7-canal-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_fedora36-docker-calico: packet_fedora31-flannel:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
RESET_CHECK: "true" MITOGEN_ENABLE: "true"
packet_fedora35-calico-selinux:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_fedora35-calico-swap-selinux:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_amazon-linux-2-aio: packet_amazon-linux-2-aio:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_almalinux8-calico-nodelocaldns-secondary: packet_fedora32-kube-ovn-containerd:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual
packet_fedora36-kube-ovn:
stage: deploy-part2
extends: .packet_periodic
when: on_success when: on_success
# ### PR JOBS PART3 # ### PR JOBS PART3
@ -266,63 +190,40 @@ packet_fedora36-kube-ovn:
packet_centos7-weave-upgrade-ha: packet_centos7-weave-upgrade-ha:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
UPGRADE_TEST: basic UPGRADE_TEST: basic
MITOGEN_ENABLE: "false"
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha: packet_debian9-calico-upgrade:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success
variables:
UPGRADE_TEST: basic
# Calico HA Wireguard
packet_ubuntu20-calico-ha-wireguard:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success when: on_success
variables: variables:
UPGRADE_TEST: graceful UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"
packet_almalinux8-calico-remove-node: packet_debian9-calico-upgrade-once:
stage: deploy-part3 stage: deploy-part3
extends: .packet_pr extends: .packet
when: on_success
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once:
stage: deploy-part3
extends: .packet_periodic
when: on_success when: on_success
variables: variables:
UPGRADE_TEST: graceful UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"
packet_ubuntu18-calico-ha-recover: packet_ubuntu18-calico-ha-recover:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
packet_ubuntu18-calico-ha-recover-noquorum: packet_ubuntu18-calico-ha-recover-noquorum:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"

View file

@ -4,13 +4,13 @@ shellcheck:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]
variables: variables:
SHELLCHECK_VERSION: v0.7.1 SHELLCHECK_VERSION: v0.6.0
before_script: before_script:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv - curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/ - cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
- shellcheck --version - shellcheck --version
script: script:
# Run shellcheck for all *.sh # Run shellcheck for all *.sh except contrib/
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error - find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
except: ['triggers', 'master'] except: ['triggers', 'master']

View file

@ -12,13 +12,13 @@
# Prepare inventory # Prepare inventory
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . - cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
- ln -s contrib/terraform/$PROVIDER/hosts - ln -s contrib/terraform/$PROVIDER/hosts
- terraform -chdir="contrib/terraform/$PROVIDER" init - terraform init contrib/terraform/$PROVIDER
# Copy SSH keypair # Copy SSH keypair
- mkdir -p ~/.ssh - mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa - echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa - chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub - echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- mkdir -p contrib/terraform/$PROVIDER/group_vars - mkdir -p group_vars
# Random subnet to avoid routing conflicts # Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24" - export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
@ -28,8 +28,8 @@
tags: [light] tags: [light]
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
script: script:
- terraform -chdir="contrib/terraform/$PROVIDER" validate - terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff - terraform fmt -check -diff contrib/terraform/$PROVIDER
.terraform_apply: .terraform_apply:
extends: .terraform_install extends: .terraform_install
@ -56,48 +56,28 @@
tf-validate-openstack: tf-validate-openstack:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_VERSION TF_VERSION: 0.12.24
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-metal: tf-validate-packet:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_VERSION TF_VERSION: 0.12.24
PROVIDER: metal PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-aws: tf-validate-aws:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_VERSION TF_VERSION: 0.12.24
PROVIDER: aws PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: exoscale
tf-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-upcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: upcloud
CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default: # tf-packet-ubuntu16-default:
# extends: .terraform_apply # extends: .terraform_apply
# variables: # variables:
# TF_VERSION: $TERRAFORM_VERSION # TF_VERSION: 0.12.24
# PROVIDER: packet # PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME # CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" # TF_VAR_number_of_k8s_masters: "1"
@ -111,7 +91,7 @@ tf-validate-upcloud:
# tf-packet-ubuntu18-default: # tf-packet-ubuntu18-default:
# extends: .terraform_apply # extends: .terraform_apply
# variables: # variables:
# TF_VERSION: $TERRAFORM_VERSION # TF_VERSION: 0.12.24
# PROVIDER: packet # PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME # CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" # TF_VAR_number_of_k8s_masters: "1"
@ -146,6 +126,10 @@ tf-validate-upcloud:
OS_INTERFACE: public OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3" OS_IDENTITY_API_VERSION: "3"
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df" TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
# Since ELASTX is in Stockholm, Mitogen helps with latency
MITOGEN_ENABLE: "false"
# Mitogen doesn't support interpreter discovery yet
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
tf-elastx_cleanup: tf-elastx_cleanup:
stage: unit-tests stage: unit-tests
@ -162,10 +146,9 @@ tf-elastx_ubuntu18-calico:
extends: .terraform_apply extends: .terraform_apply
stage: deploy-part3 stage: deploy-part3
when: on_success when: on_success
allow_failure: true
variables: variables:
<<: *elastx_variables <<: *elastx_variables
TF_VERSION: $TERRAFORM_VERSION TF_VERSION: 0.12.24
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60" ANSIBLE_TIMEOUT: "60"
@ -191,45 +174,44 @@ tf-elastx_ubuntu18-calico:
TF_VAR_image: ubuntu-18.04-server-latest TF_VAR_image: ubuntu-18.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# OVH voucher expired, commenting job until things are sorted out
# tf-ovh_cleanup: tf-ovh_cleanup:
# stage: unit-tests stage: unit-tests
# tags: [light] tags: [light]
# image: python image: python
# environment: ovh environment: ovh
# variables: variables:
# <<: *ovh_variables <<: *ovh_variables
# before_script: before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt - pip install -r scripts/openstack-cleanup/requirements.txt
# script: script:
# - ./scripts/openstack-cleanup/main.py - ./scripts/openstack-cleanup/main.py
# tf-ovh_ubuntu18-calico: tf-ovh_ubuntu18-calico:
# extends: .terraform_apply extends: .terraform_apply
# when: on_success when: on_success
# environment: ovh environment: ovh
# variables: variables:
# <<: *ovh_variables <<: *ovh_variables
# TF_VERSION: $TERRAFORM_VERSION TF_VERSION: 0.12.24
# PROVIDER: openstack PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60" ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0" TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1" TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0" TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0" TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1" TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0" TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0" TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0" TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net" TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b" TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net" TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8 TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8 TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04" TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View file

@ -1,5 +1,21 @@
--- ---
molecule_tests:
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
services: []
stage: deploy-part1
before_script:
- tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/molecule_run.sh
.vagrant: .vagrant:
extends: .testcases extends: .testcases
variables: variables:
@ -15,19 +31,12 @@
before_script: before_script:
- apt-get update && apt-get install -y python3-pip - apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip uninstall -y ansible ansible-base ansible-core
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh - ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/testcases_run.sh - ./tests/scripts/testcases_run.sh
after_script: after_script:
- chronic ./tests/scripts/testcases_cleanup.sh - chronic ./tests/scripts/testcases_cleanup.sh
allow_failure: true
vagrant_ubuntu18-calico-dual-stack:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_ubuntu18-flannel: vagrant_ubuntu18-flannel:
stage: deploy-part2 stage: deploy-part2
@ -43,25 +52,3 @@ vagrant_ubuntu20-flannel:
stage: deploy-part2 stage: deploy-part2
extends: .vagrant extends: .vagrant
when: on_success when: on_success
allow_failure: false
vagrant_ubuntu16-kube-router-sep:
stage: deploy-part2
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu16-kube-router-svc-proxy:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_fedora35-kube-router:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_centos7-kube-router:
stage: deploy-part2
extends: .vagrant
when: manual

View file

@ -1,3 +1,2 @@
--- ---
MD013: false MD013: false
MD029: false

View file

@ -1,48 +0,0 @@
---
repos:
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.27.1
hooks:
- id: yamllint
args: [--strict]
- repo: https://github.com/markdownlint/markdownlint
rev: v0.11.0
hooks:
- id: markdownlint
args: [ -r, "~MD013,~MD029" ]
exclude: "^.git"
- repo: local
hooks:
- id: ansible-lint
name: ansible-lint
entry: ansible-lint -v
language: python
pass_filenames: false
additional_dependencies:
- .[community]
- id: ansible-syntax-check
name: ansible-syntax-check
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
language: python
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
- id: tox-inventory-builder
name: tox-inventory-builder
entry: bash -c "cd contrib/inventory_builder && tox"
language: python
pass_filenames: false
- id: check-readme-versions
name: check-readme-versions
entry: tests/scripts/check_readme_versions.sh
language: script
pass_filenames: false
- id: ci-matrix
name: ci-matrix
entry: tests/scripts/md-table/test.sh
language: script
pass_filenames: false

View file

@ -1,9 +1,6 @@
--- ---
extends: default extends: default
ignore: |
.git/
rules: rules:
braces: braces:
min-spaces-inside: 0 min-spaces-inside: 0

View file

@ -6,22 +6,11 @@
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications) It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
To install development dependencies you can set up a python virtual env with the necessary dependencies: To install development dependencies you can use `pip install -r tests/requirements.txt`
```ShellSession
virtualenv venv
source venv/bin/activate
pip install -r tests/requirements.txt
```
#### Linting #### Linting
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR. Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
```ShellSession
pre-commit install
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
```
#### Molecule #### Molecule
@ -38,9 +27,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
1. Submit an issue describing your proposed change to the repo in question. 1. Submit an issue describing your proposed change to the repo in question.
2. The [repo owners](OWNERS) will respond to your issue promptly. 2. The [repo owners](OWNERS) will respond to your issue promptly.
3. Fork the desired repo, develop and test your code changes. 3. Fork the desired repo, develop and test your code changes.
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo. 4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
5. Addess any pre-commit validation failures. 5. Submit a pull request.
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
7. Submit a pull request.
8. Work with the reviewers on their suggestions.
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.

View file

@ -1,37 +1,21 @@
# Use imutable image tags rather than mutable tags (like ubuntu:20.04) FROM ubuntu:18.04
FROM ubuntu:focal-20220531
ARG ARCH=amd64 RUN mkdir /kubespray
ARG TZ=Etc/UTC WORKDIR /kubespray
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN apt update -y && \
apt install -y \
RUN apt update -y \
&& apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \ libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \ ca-certificates curl gnupg2 software-properties-common python3-pip rsync
&& rm -rf /var/lib/apt/lists/* RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ add-apt-repository \
&& add-apt-repository \ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \ $(lsb_release -cs) \
stable" \ stable" \
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \ && apt update -y && apt-get install docker-ce -y
&& rm -rf /var/lib/apt/lists/* COPY . .
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
# Some tools like yamllint need this # Some tools like yamllint need this
# Pip needs this as well at the moment to install ansible
# (and potentially other packages)
# See: https://github.com/pypa/pip/issues/10219
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8
WORKDIR /kubespray
COPY . .
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
&& python3 -m pip install --no-cache-dir -r requirements.txt \
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
&& chmod a+x kubectl \
&& mv kubectl /usr/local/bin/kubectl

View file

@ -1,7 +1,5 @@
mitogen: mitogen:
@echo Mitogen support is deprecated. ansible-playbook -c local mitogen.yml -vv
@echo Please run the following command manually:
@echo ansible-playbook -c local mitogen.yml -vv
clean: clean:
rm -rf dist/ rm -rf dist/
rm *.retry rm *.retry

2
OWNERS
View file

@ -4,5 +4,3 @@ approvers:
- kubespray-approvers - kubespray-approvers
reviewers: reviewers:
- kubespray-reviewers - kubespray-reviewers
emeritus_approvers:
- kubespray-emeritus_approvers

View file

@ -1,26 +1,19 @@
aliases: aliases:
kubespray-approvers: kubespray-approvers:
- ant31
- mattymo - mattymo
- atoms
- chadswen - chadswen
- mirwan - mirwan
- miouge1 - miouge1
- riverzhang
- verwilst
- woopstar
- luckysb - luckysb
- floryut
- oomichi
- cristicalin
- liupeng0518
- yankay
kubespray-reviewers: kubespray-reviewers:
- jjungnickel
- archifleks
- holmsten - holmsten
- bozzo - bozzo
- floryut
- eppo - eppo
- oomichi
- jayonlau
- cristicalin
- liupeng0518
- yankay
kubespray-emeritus_approvers:
- riverzhang
- atoms
- ant31
- woopstar

126
README.md
View file

@ -5,7 +5,7 @@
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
You can get your invite [here](http://slack.k8s.io/) You can get your invite [here](http://slack.k8s.io/)
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** - Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- **Highly available** cluster - **Highly available** cluster
- **Composable** (Choice of the network plugin for instance) - **Composable** (Choice of the network plugin for instance)
- Supports most popular **Linux distributions** - Supports most popular **Linux distributions**
@ -19,10 +19,10 @@ To deploy the cluster you can use :
#### Usage #### Usage
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
then run the following steps:
```ShellSession ```ShellSession
# Install dependencies from ``requirements.txt``
sudo pip3 install -r requirements.txt
# Copy ``inventory/sample`` as ``inventory/mycluster`` # Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster cp -rfp inventory/sample inventory/mycluster
@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
# Review and change parameters under ``inventory/mycluster/group_vars`` # Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root # Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/, # The option `--become` is required, as for example writing SSL keys in /etc/,
@ -48,24 +48,11 @@ As a consequence, `ansible-playbook` command will fail with:
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
``` ```
probably pointing on a task depending on a module present in requirements.txt. probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible. One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`. A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession
git checkout v2.20.0
docker pull quay.io/kubespray/kubespray:v2.20.0
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.20.0 bash
# Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
### Vagrant ### Vagrant
For Vagrant we need to install python dependencies for provisioning tasks. For Vagrant we need to install python dependencies for provisioning tasks.
@ -76,11 +63,10 @@ python -V && pip -V
``` ```
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/> If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
Install the necessary requirements
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
then run the following step:
```ShellSession ```ShellSession
sudo pip install -r requirements.txt
vagrant up vagrant up
``` ```
@ -107,85 +93,63 @@ vagrant up
- [AWS](docs/aws.md) - [AWS](docs/aws.md)
- [Azure](docs/azure.md) - [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md) - [vSphere](docs/vsphere.md)
- [Equinix Metal](docs/equinix-metal.md) - [Packet Host](docs/packet.md)
- [Large deployments](docs/large-deployments.md) - [Large deployments](docs/large-deployments.md)
- [Adding/replacing a node](docs/nodes.md) - [Adding/replacing a node](docs/nodes.md)
- [Upgrades basics](docs/upgrades.md) - [Upgrades basics](docs/upgrades.md)
- [Air-Gap installation](docs/offline-environment.md) - [Air-Gap installation](docs/offline-environment.md)
- [NTP](docs/ntp.md)
- [Hardening](docs/hardening.md)
- [Mirror](docs/mirror.md)
- [Roadmap](docs/roadmap.md) - [Roadmap](docs/roadmap.md)
## Supported Linux Distributions ## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk** - **Flatcar Container Linux by Kinvolk**
- **Debian** Bullseye, Buster, Jessie, Stretch - **Debian** Buster, Jessie, Stretch, Wheezy
- **Ubuntu** 16.04, 18.04, 20.04, 22.04 - **Ubuntu** 16.04, 18.04, 20.04
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8) - **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
- **Fedora** 35, 36 - **Fedora** 31, 32
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md)) - **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed - **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8) - **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
## Supported Components ## Supported Components
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.18.10
- [etcd](https://github.com/etcd-io/etcd) v3.5.6 - [etcd](https://github.com/coreos/etcd) v3.4.3
- [docker](https://www.docker.com/) v20.10 (see note) - [docker](https://www.docker.com/) v19.03 (see note)
- [containerd](https://containerd.io/) v1.6.14 - [containerd](https://containerd.io/) v1.2.13
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin - Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1 - [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
- [calico](https://github.com/projectcalico/calico) v3.24.5 - [calico](https://github.com/projectcalico/calico) v3.15.2
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.12.1 - [cilium](https://github.com/cilium/cilium) v1.8.3
- [flannel](https://github.com/flannel-io/flannel) v0.19.2 - [contiv](https://github.com/contiv/install) v1.2.1
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7 - [flanneld](https://github.com/coreos/flannel) v0.12.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.3.0
- [multus](https://github.com/intel/multus-cni) v3.8 - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.1
- [weave](https://github.com/weaveworks/weave) v2.8.1 - [multus](https://github.com/intel/multus-cni) v3.6.0
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5 - [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
- [weave](https://github.com/weaveworks/weave) v2.7.0
- Application - Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1 - [ambassador](https://github.com/datawire/ambassador): v1.5
- [coredns](https://github.com/coredns/coredns) v1.9.3
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
- [argocd](https://argoproj.github.io/) v2.4.16
- [helm](https://helm.sh/) v3.9.4
- [metallb](https://metallb.universe.tf/) v0.12.1
- [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11 - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0 - [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0 - [coredns](https://github.com/coredns/coredns) v1.6.7
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.35.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
## Container Runtime Notes Note: The list of validated [docker versions](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker) is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09 and 19.03. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
## Requirements ## Requirements
- **Minimum required version of Kubernetes is v1.23** - **Minimum required version of Kubernetes is v1.17**
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** - **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers are configured to allow **IPv4 forwarding**. - The target servers are configured to allow **IPv4 forwarding**.
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. - **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method - If kubespray is ran from non-root user account, correct privilege escalation method
@ -215,6 +179,11 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. - [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. - [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)). (Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
@ -235,9 +204,9 @@ See also [Network checker](docs/netcheck.md).
## Ingress Plugins ## Ingress Plugins
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. - [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider. - [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
## Community docs and resources ## Community docs and resources
@ -250,12 +219,11 @@ See also [Network checker](docs/netcheck.md).
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
- [Kubean](https://github.com/kubean-io/kubean)
## CI Tests ## CI Tests
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) [![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/). CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
See the [test matrix](docs/test_cases.md) for details. See the [test matrix](docs/test_cases.md) for details.

View file

@ -2,18 +2,17 @@
The Kubespray Project is released on an as-needed basis. The process is as follows: The Kubespray Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325) 1. An issue is proposing a new release with a changelog since the last release
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release 2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
3. The `kube_version_min_required` variable is set to `n-1` 3. The `kube_version_min_required` variable is set to `n-1`
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables. 4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details. 5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes 6. An approver creates a release branch in the form `release-X.Y`
7. An approver creates a release branch in the form `release-X.Y` 7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details. 8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml` 9. The release issue is closed
10. The release issue is closed 10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released` 11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
## Major/minor releases and milestones ## Major/minor releases and milestones
@ -47,37 +46,3 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1 then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
and *any* changes to other components, like etcd v4, or calico 1.2.3. and *any* changes to other components, like etcd v4, or calico 1.2.3.
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively. And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
## Release note creation
You can create a release note with:
```shell
export GITHUB_TOKEN=<your-github-token>
export ORG=kubernetes-sigs
export REPO=kubespray
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
```
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
## Container image creation
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
```shell
cd kubespray/
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
```
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
```shell
cd kubespray/test-infra/vagrant-docker/
./build vX.Y.Z
```
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
If you don't have the permission, please ask it on the #kubespray-dev channel.

View file

@ -9,7 +9,5 @@
# #
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/ # INSTRUCTIONS AT https://kubernetes.io/security/
atoms
mattymo mattymo
floryut
oomichi
cristicalin

68
Vagrantfile vendored
View file

@ -26,17 +26,12 @@ SUPPORTED_OS = {
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"}, "centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos8" => {box: "centos/8", user: "vagrant"}, "centos8" => {box: "centos/8", user: "vagrant"},
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"}, "centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"almalinux8" => {box: "almalinux/8", user: "vagrant"}, "fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"}, "fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"}, "opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, "oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"}, "oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
} }
if File.exist?(CONFIG) if File.exist?(CONFIG)
@ -52,13 +47,12 @@ $vm_cpus ||= 2
$shared_folders ||= {} $shared_folders ||= {}
$forwarded_ports ||= {} $forwarded_ports ||= {}
$subnet ||= "172.18.8" $subnet ||= "172.18.8"
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
$os ||= "ubuntu1804" $os ||= "ubuntu1804"
$network_plugin ||= "flannel" $network_plugin ||= "flannel"
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
$multi_networking ||= "False" $multi_networking ||= false
$download_run_once ||= "True" $download_run_once ||= "True"
$download_force_cache ||= "False" $download_force_cache ||= "True"
# The first three nodes are etcd servers # The first three nodes are etcd servers
$etcd_instances ||= $num_instances $etcd_instances ||= $num_instances
# The first two nodes are kube masters # The first two nodes are kube masters
@ -71,12 +65,9 @@ $kube_node_instances_with_disks_size ||= "20G"
$kube_node_instances_with_disks_number ||= 2 $kube_node_instances_with_disks_number ||= 2
$override_disk_size ||= false $override_disk_size ||= false
$disk_size ||= "20GB" $disk_size ||= "20GB"
$local_path_provisioner_enabled ||= "False" $local_path_provisioner_enabled ||= false
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/" $local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
$libvirt_nested ||= false $libvirt_nested ||= false
# boolean or string (e.g. "-vvv")
$ansible_verbosity ||= false
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
$playbook ||= "cluster.yml" $playbook ||= "cluster.yml"
@ -92,9 +83,9 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini")) if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible") $vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible) FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
$vagrant_inventory = File.join($vagrant_ansible,"inventory") if ! File.exist?(File.join($vagrant_ansible,"inventory"))
FileUtils.rm_f($vagrant_inventory) FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
FileUtils.ln_s($inventory, $vagrant_inventory) end
end end
if Vagrant.has_plugin?("vagrant-proxyconf") if Vagrant.has_plugin?("vagrant-proxyconf")
@ -151,7 +142,6 @@ Vagrant.configure("2") do |config|
vb.gui = $vm_gui vb.gui = $vm_gui
vb.linked_clone = true vb.linked_clone = true
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
vb.customize ["modifyvm", :id, "--audio", "none"]
end end
node.vm.provider :libvirt do |lv| node.vm.provider :libvirt do |lv|
@ -173,7 +163,7 @@ Vagrant.configure("2") do |config|
# always make /dev/sd{a/b/c} so that CI can ensure that # always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs # virtualbox and libvirt will have the same devices to use for OSDs
(1..$kube_node_instances_with_disks_number).each do |d| (1..$kube_node_instances_with_disks_number).each do |d|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi" lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
end end
end end
end end
@ -186,39 +176,19 @@ Vagrant.configure("2") do |config|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
end end
if ["rhel7","rhel8"].include? $os
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
# be installed until the host is registered with a valid Red Hat support subscription
node.vm.synced_folder ".", "/vagrant", disabled: false
$shared_folders.each do |src, dst|
node.vm.synced_folder src, dst
end
else
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv'] node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
$shared_folders.each do |src, dst| $shared_folders.each do |src, dst|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end end
end
ip = "#{$subnet}.#{i+100}" ip = "#{$subnet}.#{i+100}"
node.vm.network :private_network, ip: ip, node.vm.network :private_network, ip: ip
:libvirt__guest_ipv6 => 'yes',
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
:libvirt__ipv6_prefix => "64",
:libvirt__forward_mode => "none",
:libvirt__dhcp_enabled => false
# Disable swap for each vm # Disable swap for each vm
node.vm.provision "shell", inline: "swapoff -a" node.vm.provision "shell", inline: "swapoff -a"
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that. # Disable firewalld on oraclelinux vms
if ["ubuntu1804", "ubuntu2004"].include? $os if ["oraclelinux","oraclelinux8"].include? $os
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
end
# Disable firewalld on oraclelinux/redhat vms
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld" node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
end end
@ -244,11 +214,9 @@ Vagrant.configure("2") do |config|
} }
# Only execute the Ansible provisioner once, when all the machines are up and ready. # Only execute the Ansible provisioner once, when all the machines are up and ready.
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
if i == $num_instances if i == $num_instances
node.vm.provision "ansible" do |ansible| node.vm.provision "ansible" do |ansible|
ansible.playbook = $playbook ansible.playbook = $playbook
ansible.verbose = $ansible_verbosity
$ansible_inventory_path = File.join( $inventory, "hosts.ini") $ansible_inventory_path = File.join( $inventory, "hosts.ini")
if File.exist?($ansible_inventory_path) if File.exist?($ansible_inventory_path)
ansible.inventory_path = $ansible_inventory_path ansible.inventory_path = $ansible_inventory_path
@ -258,14 +226,12 @@ Vagrant.configure("2") do |config|
ansible.host_key_checking = false ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"] ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
ansible.host_vars = host_vars ansible.host_vars = host_vars
if $ansible_tags != "" #ansible.tags = ['download']
ansible.tags = [$ansible_tags]
end
ansible.groups = { ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"], "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], "kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s_cluster:children" => ["kube_control_plane", "kube_node"], "k8s-cluster:children" => ["kube-master", "kube-node"],
} }
end end
end end

View file

@ -1,8 +1,9 @@
[ssh_connection] [ssh_connection]
pipelining=True pipelining=True
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
#control_path = ~/.ssh/ansible-%%r@%%h:%%p #control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults] [defaults]
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .) # https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
force_valid_group_names = ignore force_valid_group_names = ignore
@ -10,11 +11,11 @@ host_key_checking=False
gathering = smart gathering = smart
fact_caching = jsonfile fact_caching = jsonfile
fact_caching_connection = /tmp fact_caching_connection = /tmp
fact_caching_timeout = 86400 fact_caching_timeout = 7200
stdout_callback = default stdout_callback = default
display_skipped_hosts = no display_skipped_hosts = no
library = ./library library = ./library
callbacks_enabled = profile_tasks,ara_default callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg

View file

@ -3,31 +3,13 @@
gather_facts: false gather_facts: false
become: no become: no
vars: vars:
minimal_ansible_version: 2.11.0 minimal_ansible_version: 2.8.0
maximal_ansible_version: 2.14.0
ansible_connection: local ansible_connection: local
tags: always
tasks: tasks:
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}" - name: "Check ansible version >={{ minimal_ansible_version }}"
assert: assert:
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive" msg: "Ansible must be {{ minimal_ansible_version }} or higher"
that: that:
- ansible_version.string is version(minimal_ansible_version, ">=") - ansible_version.string is version(minimal_ansible_version, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
tags:
- check
- name: "Check that python netaddr is installed"
assert:
msg: "Python netaddr is not present"
that: "'127.0.0.1' | ipaddr"
tags:
- check
# CentOS 7 provides too old jinja version
- name: "Check that jinja is not too old (install via pip)"
assert:
msg: "Your Jinja version is too old, install via pip"
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
tags: tags:
- check - check

View file

@ -2,21 +2,31 @@
- name: Check ansible version - name: Check ansible version
import_playbook: ansible_version.yml import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups - hosts: all
import_playbook: legacy_groups.yml gather_facts: false
tags: always
tasks:
- name: "Set up proxy environment"
set_fact:
proxy_env:
http_proxy: "{{ http_proxy | default ('') }}"
HTTP_PROXY: "{{ http_proxy | default ('') }}"
https_proxy: "{{ https_proxy | default ('') }}"
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
no_proxy: "{{ no_proxy | default ('') }}"
NO_PROXY: "{{ no_proxy | default ('') }}"
no_log: true
- hosts: bastion[0] - hosts: bastion[0]
gather_facts: False gather_facts: False
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd - hosts: k8s-cluster:etcd
strategy: linear strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false gather_facts: false
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
@ -25,20 +35,19 @@
tags: always tags: always
import_playbook: facts.yml import_playbook: facts.yml
- hosts: k8s_cluster:etcd - hosts: k8s-cluster:etcd
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine } - { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
- { role: download, tags: download, when: "not skip_downloads" } - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{ proxy_env }}"
- hosts: etcd:kube_control_plane - hosts: etcd
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- role: etcd - role: etcd
@ -46,12 +55,11 @@
vars: vars:
etcd_cluster_setup: true etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm" when: not etcd_kubeadm_enabled| default(false)
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- role: etcd - role: etcd
@ -59,59 +67,52 @@
vars: vars:
etcd_cluster_setup: false etcd_cluster_setup: false
etcd_events_cluster_setup: false etcd_events_cluster_setup: false
when: when: not etcd_kubeadm_enabled| default(false)
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
environment: "{{ proxy_env }}"
- hosts: kube_control_plane - hosts: kube-master
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client } - { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm} - { role: kubernetes/kubeadm, tags: kubeadm}
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: kubernetes/node-label, tags: node-label }
- hosts: calico_rr - hosts: calico-rr
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane - hosts: kube-master
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
@ -119,13 +120,18 @@
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up - hosts: kube-master
hosts: k8s_cluster gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps, tags: apps }
environment: "{{ proxy_env }}"
- hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

View file

@ -35,7 +35,7 @@ class SearchEC2Tags(object):
hosts['_meta'] = { 'hostvars': {} } hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube_control_plane", "kube_node", "etcd"]: for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = [] hosts[group] = []
tag_key = "kubespray-role" tag_key = "kubespray-role"
tag_value = ["*"+group+"*"] tag_value = ["*"+group+"*"]
@ -70,7 +70,7 @@ class SearchEC2Tags(object):
hosts[group].append(dns_name) hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host hosts['_meta']['hostvars'][dns_name] = ansible_host
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
print(json.dumps(hosts, sort_keys=True, indent=2)) print(json.dumps(hosts, sort_keys=True, indent=2))
SearchEC2Tags() SearchEC2Tags()

View file

@ -1 +0,0 @@
boto3 # Apache-2.0

View file

@ -31,7 +31,7 @@ also removes all public IPs from all other VMs.
To generate and apply the templates, call: To generate and apply the templates, call:
```shell ```shell
./apply-rg.sh <resource_group_name> $ ./apply-rg.sh <resource_group_name>
``` ```
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
@ -42,26 +42,25 @@ take care about creating/modifying whatever is needed.
If you need to delete all resources from a resource group, simply call: If you need to delete all resources from a resource group, simply call:
```shell ```shell
./clear-rg.sh <resource_group_name> $ ./clear-rg.sh <resource_group_name>
``` ```
**WARNING** this really deletes everything from your resource group, including everything that was later created by you! **WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Installing Ansible and the dependencies
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
## Generating an inventory for kubespray ## Generating an inventory for kubespray
After you have applied the templates, you can generate an inventory with this call: After you have applied the templates, you can generate an inventory with this call:
```shell ```shell
./generate-inventory.sh <resource_group_name> $ ./generate-inventory.sh <resource_group_name>
``` ```
It will create the file ./inventory which can then be used with kubespray, e.g.: It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell ```shell
cd kubespray-root-dir $ cd kubespray-root-dir
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml $ sudo pip3 install -r requirements.txt
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
``` ```

View file

@ -12,4 +12,3 @@
template: template:
src: inventory.j2 src: inventory.j2
dest: "{{ playbook_dir }}/inventory" dest: "{{ playbook_dir }}/inventory"
mode: 0644

View file

@ -7,9 +7,9 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_control_plane] [kube-master]
{% for vm in vm_list %} {% for vm in vm_list %}
{% if 'kube_control_plane' in vm.tags.roles %} {% if 'kube-master' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
@ -21,13 +21,13 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_node] [kube-node]
{% for vm in vm_list %} {% for vm in vm_list %}
{% if 'kube_node' in vm.tags.roles %} {% if 'kube-node' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master

View file

@ -22,10 +22,8 @@
template: template:
src: inventory.j2 src: inventory.j2
dest: "{{ playbook_dir }}/inventory" dest: "{{ playbook_dir }}/inventory"
mode: 0644
- name: Generate Load Balancer variables - name: Generate Load Balancer variables
template: template:
src: loadbalancer_vars.j2 src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml" dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
mode: 0644

View file

@ -7,9 +7,9 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_control_plane] [kube-master]
{% for vm in vm_roles_list %} {% for vm in vm_roles_list %}
{% if 'kube_control_plane' in vm.tags.roles %} {% if 'kube-master' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
@ -21,14 +21,14 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_node] [kube-node]
{% for vm in vm_roles_list %} {% for vm in vm_roles_list %}
{% if 'kube_node' in vm.tags.roles %} {% if 'kube-node' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master

View file

@ -8,13 +8,11 @@
path: "{{ base_dir }}" path: "{{ base_dir }}"
state: directory state: directory
recurse: true recurse: true
mode: 0755
- name: Store json files in base_dir - name: Store json files in base_dir
template: template:
src: "{{ item }}" src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}" dest: "{{ base_dir }}/{{ item }}"
mode: 0644
with_items: with_items:
- network.json - network.json
- storage.json - storage.json

View file

@ -144,7 +144,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
], ],
"tags": { "tags": {
"roles": "kube_control_plane,etcd" "roles": "kube-master,etcd"
}, },
"apiVersion": "{{apiVersion}}", "apiVersion": "{{apiVersion}}",
"properties": { "properties": {

View file

@ -61,7 +61,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
], ],
"tags": { "tags": {
"roles": "kube_node" "roles": "kube-node"
}, },
"apiVersion": "{{apiVersion}}", "apiVersion": "{{apiVersion}}",
"properties": { "properties": {

View file

@ -6,7 +6,6 @@ to serve as Kubernetes "nodes", which in turn will run
called DIND (Docker-IN-Docker). called DIND (Docker-IN-Docker).
The playbook has two roles: The playbook has two roles:
- dind-host: creates the "nodes" as containers in localhost, with - dind-host: creates the "nodes" as containers in localhost, with
appropriate settings for DIND (privileged, volume mapping for dind appropriate settings for DIND (privileged, volume mapping for dind
storage, etc). storage, etc).
@ -28,7 +27,7 @@ See below for a complete successful run:
1. Create the node containers 1. Create the node containers
```shell ~~~~
# From the kubespray root dir # From the kubespray root dir
cd contrib/dind cd contrib/dind
pip install -r requirements.txt pip install -r requirements.txt
@ -37,15 +36,15 @@ ansible-playbook -i hosts dind-cluster.yaml
# Back to kubespray root # Back to kubespray root
cd ../.. cd ../..
``` ~~~~
NOTE: if the playbook run fails with something like below error NOTE: if the playbook run fails with something like below error
message, you may need to specifically set `ansible_python_interpreter`, message, you may need to specifically set `ansible_python_interpreter`,
see `./hosts` file for an example expanded localhost entry. see `./hosts` file for an example expanded localhost entry.
```shell ~~~
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"} failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
``` ~~~
2. Customize kubespray-dind.yaml 2. Customize kubespray-dind.yaml
@ -53,33 +52,33 @@ Note that there's coupling between above created node containers
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro` and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
(as set in `group_vars/all/all.yaml`), and docker settings. (as set in `group_vars/all/all.yaml`), and docker settings.
```shell ~~~
$EDITOR contrib/dind/kubespray-dind.yaml $EDITOR contrib/dind/kubespray-dind.yaml
``` ~~~
3. Prepare the inventory and run the playbook 3. Prepare the inventory and run the playbook
```shell ~~~
INVENTORY_DIR=inventory/local-dind INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR} mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
``` ~~~
NOTE: You could also test other distros without editing files by NOTE: You could also test other distros without editing files by
passing `--extra-vars` as per below commandline, passing `--extra-vars` as per below commandline,
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`: replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
```shell ~~~
cd contrib/dind cd contrib/dind
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
cd ../.. cd ../..
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
``` ~~~
## Resulting deployment ## Resulting deployment
@ -90,7 +89,7 @@ from the host where you ran kubespray playbooks.
Running from an Ubuntu Xenial host: Running from an Ubuntu Xenial host:
```shell ~~~
$ uname -a $ uname -a
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24 Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux 15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
@ -150,14 +149,14 @@ kube-system weave-net-xr46t 2/2 Running 0
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check $ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null} {"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
``` ~~~
## Using ./run-test-distros.sh ## Using ./run-test-distros.sh
You can use `./run-test-distros.sh` to run a set of tests via DIND, You can use `./run-test-distros.sh` to run a set of tests via DIND,
and excerpt from this script, to get an idea: and excerpt from this script, to get an idea:
```shell ~~~
# The SPEC file(s) must have two arrays as e.g. # The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos) # DISTROS=(debian centos)
# EXTRAS=( # EXTRAS=(
@ -170,7 +169,7 @@ and excerpt from this script, to get an idea:
# #
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars # Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run. # to main kubespray ansible-playbook run.
``` ~~~
See e.g. `test-some_distros-most_CNIs.env` and See e.g. `test-some_distros-most_CNIs.env` and
`test-some_distros-kube_router_combo.env` in particular for a richer `test-some_distros-kube_router_combo.env` in particular for a richer

View file

@ -35,7 +35,6 @@
path-exclude=/usr/share/doc/* path-exclude=/usr/share/doc/*
path-include=/usr/share/doc/*/copyright path-include=/usr/share/doc/*/copyright
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
mode: 0644
when: when:
- ansible_os_family == 'Debian' - ansible_os_family == 'Debian'
@ -64,7 +63,6 @@
copy: copy:
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL" content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/{{ distro_user }}" dest: "/etc/sudoers.d/{{ distro_user }}"
mode: 0640
- name: Add my pubkey to "{{ distro_user }}" user authorized keys - name: Add my pubkey to "{{ distro_user }}" user authorized keys
authorized_key: authorized_key:

View file

@ -17,7 +17,7 @@ pass_or_fail() {
test_distro() { test_distro() {
local distro=${1:?};shift local distro=${1:?};shift
local extra="${*:-}" local extra="${*:-}"
local prefix="${distro[${extra}]}" local prefix="$distro[${extra}]}"
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
pass_or_fail "$prefix: dind-nodes" || return 1 pass_or_fail "$prefix: dind-nodes" || return 1
(cd ../.. (cd ../..
@ -46,7 +46,7 @@ test_distro() {
pass_or_fail "$prefix: netcheck" || return 1 pass_or_fail "$prefix: netcheck" || return 1
} }
NODES=($(egrep ^kube_node hosts)) NODES=($(egrep ^kube-node hosts))
NETCHECKER_HOST=localhost NETCHECKER_HOST=localhost
: ${OUTPUT_DIR:=./out} : ${OUTPUT_DIR:=./out}
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
echo "Loading file=${spec} ..." echo "Loading file=${spec} ..."
. ${spec} || continue . ${spec} || continue
: ${DISTROS:?} || continue : ${DISTROS:?} || continue
echo "DISTROS:" "${DISTROS[@]}" echo "DISTROS=${DISTROS[@]}"
echo "EXTRAS->" echo "EXTRAS->"
printf " %s\n" "${EXTRAS[@]}" printf " %s\n" "${EXTRAS[@]}"
let n=1 let n=1
for distro in "${DISTROS[@]}"; do for distro in ${DISTROS[@]}; do
for extra in "${EXTRAS[@]:-NULL}"; do for extra in "${EXTRAS[@]:-NULL}"; do
# Magic value to let this for run once: # Magic value to let this for run once:
[[ ${extra} == NULL ]] && unset extra [[ ${extra} == NULL ]] && unset extra
docker rm -f "${NODES[@]}" docker rm -f ${NODES[@]}
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++)) printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
{ {
info "${distro}[${extra}] START: file_out=${file_out}" info "${distro}[${extra}] START: file_out=${file_out}"

View file

@ -44,11 +44,11 @@ import re
import subprocess import subprocess
import sys import sys
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster', ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
'calico_rr'] 'calico-rr']
PROTECTED_NAMES = ROLES PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
'load', 'add'] 'load']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False} '0': False, 'no': False, 'false': False, 'off': False}
yaml = YAML() yaml = YAML()
@ -63,12 +63,10 @@ def get_var_as_bool(name, default):
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml") CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
# Remove the reference of KUBE_MASTERS after some deprecation cycles. KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
os.environ.get("KUBE_MASTERS", 2)))
# Reconfigures cluster distribution at scale # Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50)) SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200)) MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
DEBUG = get_var_as_bool("DEBUG", True) DEBUG = get_var_as_bool("DEBUG", True)
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node") HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
@ -82,54 +80,32 @@ class KubesprayInventory(object):
def __init__(self, changed_hosts=None, config_file=None): def __init__(self, changed_hosts=None, config_file=None):
self.config_file = config_file self.config_file = config_file
self.yaml_config = {} self.yaml_config = {}
loadPreviousConfig = False if self.config_file:
printHostnames = False
# See whether there are any commands to process
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
if changed_hosts[0] == "add":
loadPreviousConfig = True
changed_hosts = changed_hosts[1:]
elif changed_hosts[0] == "print_hostnames":
loadPreviousConfig = True
printHostnames = True
else:
self.parse_command(changed_hosts[0], changed_hosts[1:])
sys.exit(0)
# If the user wants to remove a node, we need to load the config anyway
if changed_hosts and changed_hosts[0][0] == "-":
loadPreviousConfig = True
if self.config_file and loadPreviousConfig: # Load previous YAML file
try: try:
self.hosts_file = open(config_file, 'r') self.hosts_file = open(config_file, 'r')
self.yaml_config = yaml.load(self.hosts_file) self.yaml_config = yaml.load_all(self.hosts_file)
except OSError as e: except OSError:
# I am assuming we are catching "cannot open file" exceptions pass
print(e)
sys.exit(1)
if printHostnames: if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
self.print_hostnames() self.parse_command(changed_hosts[0], changed_hosts[1:])
sys.exit(0) sys.exit(0)
self.ensure_required_groups(ROLES) self.ensure_required_groups(ROLES)
if changed_hosts: if changed_hosts:
changed_hosts = self.range2ips(changed_hosts) changed_hosts = self.range2ips(changed_hosts)
self.hosts = self.build_hostnames(changed_hosts, self.hosts = self.build_hostnames(changed_hosts)
loadPreviousConfig)
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES) self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
self.set_all(self.hosts) self.set_all(self.hosts)
self.set_k8s_cluster() self.set_k8s_cluster()
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1 etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count]) self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
if len(self.hosts) >= SCALE_THRESHOLD: if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_control_plane(list(self.hosts.keys())[ self.set_kube_master(list(self.hosts.keys())[
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)]) etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
else: else:
self.set_kube_control_plane( self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
self.set_kube_node(self.hosts.keys()) self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD: if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count]) self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
@ -179,29 +155,17 @@ class KubesprayInventory(object):
except IndexError: except IndexError:
raise ValueError("Host name must end in an integer") raise ValueError("Host name must end in an integer")
# Keeps already specified hosts, def build_hostnames(self, changed_hosts):
# and adds or removes the hosts provided as an argument
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
existing_hosts = OrderedDict() existing_hosts = OrderedDict()
highest_host_id = 0 highest_host_id = 0
# Load already existing hosts from the YAML
if loadPreviousConfig:
try: try:
for host in self.yaml_config['all']['hosts']: for host in self.yaml_config['all']['hosts']:
# Read configuration of an existing host existing_hosts[host] = self.yaml_config['all']['hosts'][host]
hostConfig = self.yaml_config['all']['hosts'][host]
existing_hosts[host] = hostConfig
# If the existing host seems
# to have been created automatically, detect its ID
if host.startswith(HOST_PREFIX):
host_id = self.get_host_id(host) host_id = self.get_host_id(host)
if host_id > highest_host_id: if host_id > highest_host_id:
highest_host_id = host_id highest_host_id = host_id
except Exception as e: except Exception:
# I am assuming we are catching automatically pass
# created hosts without IDs
print(e)
sys.exit(1)
# FIXME(mattymo): Fix condition where delete then add reuses highest id # FIXME(mattymo): Fix condition where delete then add reuses highest id
next_host_id = highest_host_id + 1 next_host_id = highest_host_id + 1
@ -209,7 +173,6 @@ class KubesprayInventory(object):
all_hosts = existing_hosts.copy() all_hosts = existing_hosts.copy()
for host in changed_hosts: for host in changed_hosts:
# Delete the host from config the hostname/IP has a "-" prefix
if host[0] == "-": if host[0] == "-":
realhost = host[1:] realhost = host[1:]
if self.exists_hostname(all_hosts, realhost): if self.exists_hostname(all_hosts, realhost):
@ -218,8 +181,6 @@ class KubesprayInventory(object):
elif self.exists_ip(all_hosts, realhost): elif self.exists_ip(all_hosts, realhost):
self.debug("Marked {0} for deletion.".format(realhost)) self.debug("Marked {0} for deletion.".format(realhost))
self.delete_host_by_ip(all_hosts, realhost) self.delete_host_by_ip(all_hosts, realhost)
# Host/Argument starts with a digit,
# then we assume its an IP address
elif host[0].isdigit(): elif host[0].isdigit():
if ',' in host: if ',' in host:
ip, access_ip = host.split(',') ip, access_ip = host.split(',')
@ -239,15 +200,11 @@ class KubesprayInventory(object):
next_host = subprocess.check_output(cmd, shell=True) next_host = subprocess.check_output(cmd, shell=True)
next_host = next_host.strip().decode('ascii') next_host = next_host.strip().decode('ascii')
else: else:
# Generates a hostname because we have only an IP address
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id) next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1 next_host_id += 1
# Uses automatically generated node name
# in case we dont provide it.
all_hosts[next_host] = {'ansible_host': access_ip, all_hosts[next_host] = {'ansible_host': access_ip,
'ip': ip, 'ip': ip,
'access_ip': access_ip} 'access_ip': access_ip}
# Host/Argument starts with a letter, then we assume its a hostname
elif host[0].isalpha(): elif host[0].isalpha():
if ',' in host: if ',' in host:
try: try:
@ -266,7 +223,6 @@ class KubesprayInventory(object):
'access_ip': access_ip} 'access_ip': access_ip}
return all_hosts return all_hosts
# Expand IP ranges into individual addresses
def range2ips(self, hosts): def range2ips(self, hosts):
reworked_hosts = [] reworked_hosts = []
@ -310,7 +266,7 @@ class KubesprayInventory(object):
def purge_invalid_hosts(self, hostnames, protected_names=[]): def purge_invalid_hosts(self, hostnames, protected_names=[]):
for role in self.yaml_config['all']['children']: for role in self.yaml_config['all']['children']:
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
for host in all_hosts.keys(): for host in all_hosts.keys():
if host not in hostnames and host not in protected_names: if host not in hostnames and host not in protected_names:
@ -331,54 +287,52 @@ class KubesprayInventory(object):
if self.yaml_config['all']['hosts'] is None: if self.yaml_config['all']['hosts'] is None:
self.yaml_config['all']['hosts'] = {host: None} self.yaml_config['all']['hosts'] = {host: None}
self.yaml_config['all']['hosts'][host] = opts self.yaml_config['all']['hosts'][host] = opts
elif group != 'k8s_cluster:children': elif group != 'k8s-cluster:children':
if self.yaml_config['all']['children'][group]['hosts'] is None: if self.yaml_config['all']['children'][group]['hosts'] is None:
self.yaml_config['all']['children'][group]['hosts'] = { self.yaml_config['all']['children'][group]['hosts'] = {
host: None} host: None}
else: else:
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
def set_kube_control_plane(self, hosts): def set_kube_master(self, hosts):
for host in hosts: for host in hosts:
self.add_host_to_group('kube_control_plane', host) self.add_host_to_group('kube-master', host)
def set_all(self, hosts): def set_all(self, hosts):
for host, opts in hosts.items(): for host, opts in hosts.items():
self.add_host_to_group('all', host, opts) self.add_host_to_group('all', host, opts)
def set_k8s_cluster(self): def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube_control_plane': None, k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
'kube_node': None}} self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
def set_calico_rr(self, hosts): def set_calico_rr(self, hosts):
for host in hosts: for host in hosts:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa if host in self.yaml_config['all']['children']['kube-master']:
self.debug("Not adding {0} to calico_rr group because it " self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube_control_plane " "conflicts with kube-master group".format(host))
"group".format(host))
continue continue
if host in self.yaml_config['all']['children']['kube_node']: if host in self.yaml_config['all']['children']['kube-node']:
self.debug("Not adding {0} to calico_rr group because it " self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube_node group".format(host)) "conflicts with kube-node group".format(host))
continue continue
self.add_host_to_group('calico_rr', host) self.add_host_to_group('calico-rr', host)
def set_kube_node(self, hosts): def set_kube_node(self, hosts):
for host in hosts: for host in hosts:
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD: if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of " self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in etcd " "scale deployment and host is in etcd "
"group.".format(host)) "group.".format(host))
continue continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of " self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in " "scale deployment and host is in kube-master "
"kube_control_plane group.".format(host)) "group.".format(host))
continue continue
self.add_host_to_group('kube_node', host) self.add_host_to_group('kube-node', host)
def set_etcd(self, hosts): def set_etcd(self, hosts):
for host in hosts: for host in hosts:
@ -435,11 +389,9 @@ help - Display this message
print_cfg - Write inventory file to stdout print_cfg - Write inventory file to stdout
print_ips - Write a space-delimited list of IPs from "all" group print_ips - Write a space-delimited list of IPs from "all" group
print_hostnames - Write a space-delimited list of Hostnames from "all" group print_hostnames - Write a space-delimited list of Hostnames from "all" group
add - Adds specified hosts into an already existing inventory
Advanced usage: Advanced usage:
Create new or overwrite old inventory file: inventory.py 10.10.1.5 Add another host after initial creation: inventory.py 10.10.1.5
Add another host after initial creation: inventory.py add 10.10.1.6
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5 Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3 Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3 Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
@ -450,9 +402,8 @@ Configurable env vars:
DEBUG Enable debug printing. Default: True DEBUG Enable debug printing. Default: True
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
HOST_PREFIX Host prefix for generated hosts. Default: node HOST_PREFIX Host prefix for generated hosts. Default: node
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50 SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200 MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
''' # noqa ''' # noqa
print(help_text) print(help_text)
@ -473,7 +424,6 @@ def main(argv=None):
if not argv: if not argv:
argv = sys.argv[1:] argv = sys.argv[1:]
KubesprayInventory(argv, CONFIG_FILE) KubesprayInventory(argv, CONFIG_FILE)
return 0
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -13,9 +13,8 @@
# under the License. # under the License.
import inventory import inventory
from io import StringIO import mock
import unittest import unittest
from unittest import mock
from collections import OrderedDict from collections import OrderedDict
import sys import sys
@ -27,28 +26,6 @@ if path not in sys.path:
import inventory # noqa import inventory # noqa
class TestInventoryPrintHostnames(unittest.TestCase):
@mock.patch('ruamel.yaml.YAML.load')
def test_print_hostnames(self, load_mock):
mock_io = mock.mock_open(read_data='')
load_mock.return_value = OrderedDict({'all': {'hosts': {
'node1': {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'},
'node2': {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'}}}})
with mock.patch('builtins.open', mock_io):
with self.assertRaises(SystemExit) as cm:
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
inventory.KubesprayInventory(
changed_hosts=["print_hostnames"],
config_file="file")
self.assertEqual("node1 node2\n", stdout.getvalue())
self.assertEqual(cm.exception.code, 0)
class TestInventory(unittest.TestCase): class TestInventory(unittest.TestCase):
@mock.patch('inventory.sys') @mock.patch('inventory.sys')
def setUp(self, sys_mock): def setUp(self, sys_mock):
@ -74,7 +51,7 @@ class TestInventory(unittest.TestCase):
groups = ['group1', 'group2'] groups = ['group1', 'group2']
self.inv.ensure_required_groups(groups) self.inv.ensure_required_groups(groups)
for group in groups: for group in groups:
self.assertIn(group, self.inv.yaml_config['all']['children']) self.assertTrue(group in self.inv.yaml_config['all']['children'])
def test_get_host_id(self): def test_get_host_id(self):
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain', hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
@ -90,14 +67,23 @@ class TestInventory(unittest.TestCase):
self.assertRaisesRegex(ValueError, "Host name must end in an", self.assertRaisesRegex(ValueError, "Host name must end in an",
self.inv.get_host_id, hostname) self.inv.get_host_id, hostname)
def test_build_hostnames_add_one(self):
changed_hosts = ['10.90.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_add_duplicate(self): def test_build_hostnames_add_duplicate(self):
changed_hosts = ['10.90.0.2'] changed_hosts = ['10.90.0.2']
expected = OrderedDict([('node3', expected = OrderedDict([('node1',
{'ansible_host': '10.90.0.2', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2', 'ip': '10.90.0.2',
'access_ip': '10.90.0.2'})]) 'access_ip': '10.90.0.2'})])
self.inv.yaml_config['all']['hosts'] = expected self.inv.yaml_config['all']['hosts'] = expected
result = self.inv.build_hostnames(changed_hosts, True) result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_build_hostnames_add_two(self): def test_build_hostnames_add_two(self):
@ -113,30 +99,6 @@ class TestInventory(unittest.TestCase):
result = self.inv.build_hostnames(changed_hosts) result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_build_hostnames_add_three(self):
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
expected = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'}),
('node3', {'ansible_host': '10.90.0.4',
'ip': '10.90.0.4',
'access_ip': '10.90.0.4'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_add_one(self):
changed_hosts = ['10.90.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_delete_first(self): def test_build_hostnames_delete_first(self):
changed_hosts = ['-10.90.0.2'] changed_hosts = ['-10.90.0.2']
existing_hosts = OrderedDict([ existing_hosts = OrderedDict([
@ -151,24 +113,7 @@ class TestInventory(unittest.TestCase):
('node2', {'ansible_host': '10.90.0.3', ('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3', 'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})]) 'access_ip': '10.90.0.3'})])
result = self.inv.build_hostnames(changed_hosts, True) result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_delete_by_hostname(self):
changed_hosts = ['-node1']
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts
expected = OrderedDict([
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_exists_hostname_positive(self): def test_exists_hostname_positive(self):
@ -264,8 +209,8 @@ class TestInventory(unittest.TestCase):
('doesnotbelong2', {'whateveropts=ilike'})]) ('doesnotbelong2', {'whateveropts=ilike'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts self.inv.yaml_config['all']['hosts'] = existing_hosts
self.inv.purge_invalid_hosts(proper_hostnames) self.inv.purge_invalid_hosts(proper_hostnames)
self.assertNotIn( self.assertTrue(
bad_host, self.inv.yaml_config['all']['hosts'].keys()) bad_host not in self.inv.yaml_config['all']['hosts'].keys())
def test_add_host_to_group(self): def test_add_host_to_group(self):
group = 'etcd' group = 'etcd'
@ -277,13 +222,13 @@ class TestInventory(unittest.TestCase):
self.inv.yaml_config['all']['children'][group]['hosts'].get(host), self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
None) None)
def test_set_kube_control_plane(self): def test_set_kube_master(self):
group = 'kube_control_plane' group = 'kube-master'
host = 'node1' host = 'node1'
self.inv.set_kube_control_plane([host]) self.inv.set_kube_master([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_all(self): def test_set_all(self):
hosts = OrderedDict([ hosts = OrderedDict([
@ -296,30 +241,30 @@ class TestInventory(unittest.TestCase):
self.inv.yaml_config['all']['hosts'].get(host), opt) self.inv.yaml_config['all']['hosts'].get(host), opt)
def test_set_k8s_cluster(self): def test_set_k8s_cluster(self):
group = 'k8s_cluster' group = 'k8s-cluster'
expected_hosts = ['kube_node', 'kube_control_plane'] expected_hosts = ['kube-node', 'kube-master']
self.inv.set_k8s_cluster() self.inv.set_k8s_cluster()
for host in expected_hosts: for host in expected_hosts:
self.assertIn( self.assertTrue(
host, host in
self.inv.yaml_config['all']['children'][group]['children']) self.inv.yaml_config['all']['children'][group]['children'])
def test_set_kube_node(self): def test_set_kube_node(self):
group = 'kube_node' group = 'kube-node'
host = 'node1' host = 'node1'
self.inv.set_kube_node([host]) self.inv.set_kube_node([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_etcd(self): def test_set_etcd(self):
group = 'etcd' group = 'etcd'
host = 'node1' host = 'node1'
self.inv.set_etcd([host]) self.inv.set_etcd([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_scale_scenario_one(self): def test_scale_scenario_one(self):
num_nodes = 50 num_nodes = 50
@ -330,12 +275,12 @@ class TestInventory(unittest.TestCase):
self.inv.set_all(hosts) self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3]) self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[0:2]) self.inv.set_kube_master(list(hosts.keys())[0:2])
self.inv.set_kube_node(hosts.keys()) self.inv.set_kube_node(hosts.keys())
for h in range(3): for h in range(3):
self.assertFalse( self.assertFalse(
list(hosts.keys())[h] in list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts']) self.inv.yaml_config['all']['children']['kube-node']['hosts'])
def test_scale_scenario_two(self): def test_scale_scenario_two(self):
num_nodes = 500 num_nodes = 500
@ -346,12 +291,12 @@ class TestInventory(unittest.TestCase):
self.inv.set_all(hosts) self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3]) self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[3:5]) self.inv.set_kube_master(list(hosts.keys())[3:5])
self.inv.set_kube_node(hosts.keys()) self.inv.set_kube_node(hosts.keys())
for h in range(5): for h in range(5):
self.assertFalse( self.assertFalse(
list(hosts.keys())[h] in list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts']) self.inv.yaml_config['all']['children']['kube-node']['hosts'])
def test_range2ips_range(self): def test_range2ips_range(self):
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8'] changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
@ -368,7 +313,7 @@ class TestInventory(unittest.TestCase):
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid", self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
self.inv.range2ips, host_range) self.inv.range2ips, host_range)
def test_build_hostnames_create_with_one_different_ips(self): def test_build_hostnames_different_ips_add_one(self):
changed_hosts = ['10.90.0.2,192.168.0.2'] changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1', expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2', {'ansible_host': '192.168.0.2',
@ -377,7 +322,17 @@ class TestInventory(unittest.TestCase):
result = self.inv.build_hostnames(changed_hosts) result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_build_hostnames_create_with_two_different_ips(self): def test_build_hostnames_different_ips_add_duplicate(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
self.inv.yaml_config['all']['hosts'] = expected
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_different_ips_add_two(self):
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3'] changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
expected = OrderedDict([ expected = OrderedDict([
('node1', {'ansible_host': '192.168.0.2', ('node1', {'ansible_host': '192.168.0.2',
@ -386,210 +341,6 @@ class TestInventory(unittest.TestCase):
('node2', {'ansible_host': '192.168.0.3', ('node2', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3', 'ip': '10.90.0.3',
'access_ip': '192.168.0.3'})]) 'access_ip': '192.168.0.3'})])
self.inv.yaml_config['all']['hosts'] = OrderedDict()
result = self.inv.build_hostnames(changed_hosts) result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_build_hostnames_create_with_three_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2',
'10.90.0.3,192.168.0.3',
'10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node1', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node2', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node3', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_overwrite_one_with_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = OrderedDict([('node5',
{'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_overwrite_three_with_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = OrderedDict([
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_different_ips_add_duplicate(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node3',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = expected
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'}),
('node6', {'ansible_host': '192.168.0.6',
'ip': '10.90.0.6',
'access_ip': '192.168.0.6'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
# Add two IP addresses into a config that has
# three already defined IP addresses. One of the IP addresses
# is a duplicate.
def test_build_hostnames_add_two_duplicate_one_overlap(self):
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
# Add two duplicate IP addresses into a config that has
# three already defined IP addresses
def test_build_hostnames_add_two_duplicate_two_overlap(self):
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)

View file

@ -5,7 +5,7 @@ deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kubespray itself. This playbook does not create Virtual Machines, nor does it run Kubespray itself.
## User creation ### User creation
If you want to create a user for running Kubespray deployment, you should specify If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

View file

@ -1,7 +1,7 @@
--- ---
- name: Install required packages - name: Install required packages
package: yum:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
with_items: with_items:

View file

@ -28,7 +28,7 @@
sysctl: sysctl:
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: 1 value: 1
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
state: present state: present
reload: yes reload: yes
@ -37,7 +37,7 @@
name: "{{ item }}" name: "{{ item }}"
state: present state: present
value: 0 value: 0
sysctl_file: "{{ sysctl_file_path }}" sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
reload: yes reload: yes
with_items: with_items:
- net.bridge.bridge-nf-call-arptables - net.bridge.bridge-nf-call-arptables

View file

@ -11,7 +11,6 @@
state: directory state: directory
owner: "{{ k8s_deployment_user }}" owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}" group: "{{ k8s_deployment_user }}"
mode: 0700
- name: Configure sudo for deployment user - name: Configure sudo for deployment user
copy: copy:

View file

@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
```shell ```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
``` ```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute: This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
```shell ```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
``` ```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM: If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
```shell ```
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
```ini ```
cluster_name = "cluster1" cluster_name = "cluster1"
number_of_k8s_masters = "1" number_of_k8s_masters = "1"
number_of_k8s_masters_no_floating_ip = "2" number_of_k8s_masters_no_floating_ip = "2"
@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform: As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
```shell ```
$ source ~/.stackrc $ source ~/.stackrc
$ eval $(ssh-agent -s) $ eval $(ssh-agent -s)
$ ssh-add ~/.ssh/my-desired-key $ ssh-add ~/.ssh/my-desired-key
@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
```shell ```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```
@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
Then, provision your Kubernetes (kubespray) cluster with the following ansible call: Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
```shell ```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
``` ```
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call: Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
```shell ```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
``` ```
If you need to destroy the cluster, you can run: If you need to destroy the cluster, you can run:
```shell ```
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```

View file

@ -15,10 +15,10 @@
roles: roles:
- { role: glusterfs/server } - { role: glusterfs/server }
- hosts: k8s_cluster - hosts: k8s-cluster
roles: roles:
- { role: glusterfs/client } - { role: glusterfs/client }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
roles: roles:
- { role: kubernetes-pv } - { role: kubernetes-pv }

View file

@ -14,7 +14,7 @@
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8 # gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9 # gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
# [kube_control_plane] # [kube-master]
# node1 # node1
# node2 # node2
@ -23,16 +23,16 @@
# node2 # node2
# node3 # node3
# [kube_node] # [kube-node]
# node2 # node2
# node3 # node3
# node4 # node4
# node5 # node5
# node6 # node6
# [k8s_cluster:children] # [k8s-cluster:children]
# kube_node # kube-node
# kube_control_plane # kube-master
# [gfs-cluster] # [gfs-cluster]
# gfs_node1 # gfs_node1

View file

@ -8,22 +8,18 @@ Installs and configures GlusterFS on Linux.
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role). For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes. This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
## Role Variables ## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`): Available variables are listed below, along with default values (see `defaults/main.yml`):
```yaml glusterfs_default_release: ""
glusterfs_default_release: ""
```
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy). You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
```yaml glusterfs_ppa_use: yes
glusterfs_ppa_use: yes glusterfs_ppa_version: "3.5"
glusterfs_ppa_version: "3.5"
```
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info. For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
@ -33,11 +29,9 @@ None.
## Example Playbook ## Example Playbook
```yaml
- hosts: server - hosts: server
roles: roles:
- geerlingguy.glusterfs - geerlingguy.glusterfs
```
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/). For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View file

@ -1,10 +1,10 @@
--- ---
- name: Install Prerequisites - name: Install Prerequisites
package: name={{ item }} state=present yum: name={{ item }} state=present
with_items: with_items:
- "centos-release-gluster{{ glusterfs_default_release }}" - "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages - name: Install Packages
package: name={{ item }} state=present yum: name={{ item }} state=present
with_items: with_items:
- glusterfs-client - glusterfs-client

View file

@ -9,7 +9,7 @@
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
- name: install xfs RedHat - name: install xfs RedHat
package: name=xfsprogs state=present yum: name=xfsprogs state=present
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
# Format external volumes in xfs # Format external volumes in xfs
@ -82,7 +82,6 @@
template: template:
dest: "{{ gluster_mount_dir }}/.test-file.txt" dest: "{{ gluster_mount_dir }}/.test-file.txt"
src: test-file.txt src: test-file.txt
mode: 0644
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Unmount glusterfs - name: Unmount glusterfs

View file

@ -1,11 +1,11 @@
--- ---
- name: Install Prerequisites - name: Install Prerequisites
package: name={{ item }} state=present yum: name={{ item }} state=present
with_items: with_items:
- "centos-release-gluster{{ glusterfs_default_release }}" - "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages - name: Install Packages
package: name={{ item }} state=present yum: name={{ item }} state=present
with_items: with_items:
- glusterfs-server - glusterfs-server
- glusterfs-client - glusterfs-client

View file

@ -1,4 +1,5 @@
--- ---
- hosts: all - hosts: all
roles: roles:
- { role: prepare } - role_under_test

View file

@ -3,13 +3,12 @@
template: template:
src: "{{ item.file }}" src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}" dest: "{{ kube_config_dir }}/{{ item.dest }}"
mode: 0644
with_items: with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json} - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv register: gluster_pv
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
- name: Kubernetes Apps | Set GlusterFS endpoint and PV - name: Kubernetes Apps | Set GlusterFS endpoint and PV
kube: kube:
@ -20,4 +19,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.dest }}" filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
state: "{{ item.changed | ternary('latest','present') }}" state: "{{ item.changed | ternary('latest','present') }}"
with_items: "{{ gluster_pv.results }}" with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined

View file

@ -1,26 +1,17 @@
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes # Deploy Heketi/Glusterfs into Kubespray/Kubernetes
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass. This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
## Important notice
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
## Client Setup ## Client Setup
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine. Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
## Install ## Install
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup. Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
```
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
``` ```
## Tear down ## Tear down
```
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
``` ```

View file

@ -1,5 +1,5 @@
--- ---
- hosts: kube_control_plane[0] - hosts: kube-master[0]
roles: roles:
- { role: tear-down } - { role: tear-down }

View file

@ -3,7 +3,7 @@
roles: roles:
- { role: prepare } - { role: prepare }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
tags: tags:
- "provision" - "provision"
roles: roles:

View file

@ -2,25 +2,18 @@ all:
vars: vars:
heketi_admin_key: "11elfeinhundertundelf" heketi_admin_key: "11elfeinhundertundelf"
heketi_user_key: "!!einseinseins" heketi_user_key: "!!einseinseins"
glusterfs_daemonset:
readiness_probe:
timeout_seconds: 3
initial_delay_seconds: 3
liveness_probe:
timeout_seconds: 3
initial_delay_seconds: 10
children: children:
k8s_cluster: k8s-cluster:
vars: vars:
kubelet_fail_swap_on: false kubelet_fail_swap_on: false
children: children:
kube_control_plane: kube-master:
hosts: hosts:
node1: node1:
etcd: etcd:
hosts: hosts:
node2: node2:
kube_node: kube-node:
hosts: &kube_nodes hosts: &kube_nodes
node1: node1:
node2: node2:

View file

@ -11,7 +11,7 @@
- name: "Install glusterfs mount utils (RedHat)" - name: "Install glusterfs mount utils (RedHat)"
become: true become: true
package: yum:
name: "glusterfs-fuse" name: "glusterfs-fuse"
state: "present" state: "present"
when: "ansible_os_family == 'RedHat'" when: "ansible_os_family == 'RedHat'"

View file

@ -1,10 +1,7 @@
--- ---
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap" - name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
become: true become: true
template: template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
src: "heketi-bootstrap.json.j2"
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
mode: 0640
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap" - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
kube: kube:

View file

@ -10,7 +10,6 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
mode: 0644
- name: "Copy topology configuration into container." - name: "Copy topology configuration into container."
changed_when: false changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"

View file

@ -1,9 +1,6 @@
--- ---
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset" - name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
template: template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
src: "glusterfs-daemonset.json.j2"
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
mode: 0644
become: true become: true
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset" - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
@ -30,10 +27,7 @@
delay: 5 delay: 5
- name: "Kubernetes Apps | Lay Down Heketi Service Account" - name: "Kubernetes Apps | Lay Down Heketi Service Account"
template: template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
src: "heketi-service-account.json.j2"
dest: "{{ kube_config_dir }}/heketi-service-account.json"
mode: 0644
become: true become: true
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Service Account" - name: "Kubernetes Apps | Install and configure Heketi Service Account"

View file

@ -4,7 +4,6 @@
template: template:
src: "heketi-deployment.json.j2" src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json" dest: "{{ kube_config_dir }}/heketi-deployment.json"
mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi" - name: "Kubernetes Apps | Install and configure Heketi"

View file

@ -5,7 +5,7 @@
changed_when: false changed_when: false
- name: "Kubernetes Apps | Deploy cluster role binding." - name: "Kubernetes Apps | Deploy cluster role binding."
when: "clusterrolebinding_state.stdout | length == 0" when: "clusterrolebinding_state.stdout == \"\""
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
- name: Get clusterrolebindings again - name: Get clusterrolebindings again
@ -15,7 +15,7 @@
- name: Make sure that clusterrolebindings are present now - name: Make sure that clusterrolebindings are present now
assert: assert:
that: "clusterrolebinding_state.stdout | length > 0" that: "clusterrolebinding_state.stdout != \"\""
msg: "Cluster role binding is not present." msg: "Cluster role binding is not present."
- name: Get the heketi-config-secret secret - name: Get the heketi-config-secret secret
@ -28,10 +28,9 @@
template: template:
src: "heketi.json.j2" src: "heketi.json.j2"
dest: "{{ kube_config_dir }}/heketi.json" dest: "{{ kube_config_dir }}/heketi.json"
mode: 0644
- name: "Deploy Heketi config secret" - name: "Deploy Heketi config secret"
when: "secret_state.stdout | length == 0" when: "secret_state.stdout == \"\""
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
- name: Get the heketi-config-secret secret again - name: Get the heketi-config-secret secret again
@ -41,5 +40,5 @@
- name: Make sure the heketi-config-secret secret exists now - name: Make sure the heketi-config-secret secret exists now
assert: assert:
that: "secret_state.stdout | length > 0" that: "secret_state.stdout != \"\""
msg: "Heketi config secret is not present." msg: "Heketi config secret is not present."

View file

@ -2,10 +2,7 @@
- name: "Kubernetes Apps | Lay Down Heketi Storage" - name: "Kubernetes Apps | Lay Down Heketi Storage"
become: true become: true
vars: { nodes: "{{ groups['heketi-node'] }}" } vars: { nodes: "{{ groups['heketi-node'] }}" }
template: template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
src: "heketi-storage.json.j2"
dest: "{{ kube_config_dir }}/heketi-storage.json"
mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Storage" - name: "Kubernetes Apps | Install and configure Heketi Storage"
kube: kube:

View file

@ -16,7 +16,6 @@
template: template:
src: "storageclass.yml.j2" src: "storageclass.yml.j2"
dest: "{{ kube_config_dir }}/storageclass.yml" dest: "{{ kube_config_dir }}/storageclass.yml"
mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Storace Class" - name: "Kubernetes Apps | Install and configure Storace Class"
kube: kube:

View file

@ -10,7 +10,6 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
mode: 0644
- name: "Copy topology configuration into container." # noqa 503 - name: "Copy topology configuration into container." # noqa 503
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"

View file

@ -73,8 +73,8 @@
"privileged": true "privileged": true
}, },
"readinessProbe": { "readinessProbe": {
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }}, "timeoutSeconds": 3,
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }}, "initialDelaySeconds": 3,
"exec": { "exec": {
"command": [ "command": [
"/bin/bash", "/bin/bash",
@ -84,8 +84,8 @@
} }
}, },
"livenessProbe": { "livenessProbe": {
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }}, "timeoutSeconds": 3,
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }}, "initialDelaySeconds": 10,
"exec": { "exec": {
"command": [ "command": [
"/bin/bash", "/bin/bash",

View file

@ -1,7 +1,7 @@
--- ---
- name: "Install lvm utils (RedHat)" - name: "Install lvm utils (RedHat)"
become: true become: true
package: yum:
name: "lvm2" name: "lvm2"
state: "present" state: "present"
when: "ansible_os_family == 'RedHat'" when: "ansible_os_family == 'RedHat'"
@ -19,7 +19,7 @@
become: true become: true
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2" shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
register: "volume_groups" register: "volume_groups"
ignore_errors: true # noqa ignore-errors ignore_errors: true
changed_when: false changed_when: false
- name: "Remove volume groups." # noqa 301 - name: "Remove volume groups." # noqa 301
@ -35,11 +35,11 @@
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true become: true
command: "pvremove {{ disk_volume_device_1 }} --yes" command: "pvremove {{ disk_volume_device_1 }} --yes"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: "Remove lvm utils (RedHat)" - name: "Remove lvm utils (RedHat)"
become: true become: true
package: yum:
name: "lvm2" name: "lvm2"
state: "absent" state: "absent"
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm" when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"

View file

@ -1,51 +1,51 @@
--- ---
- name: Remove storage class. # noqa 301 - name: "Remove storage class." # noqa 301
command: "{{ bin_dir }}/kubectl delete storageclass gluster" command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Tear down heketi. # noqa 301 - name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Tear down heketi. # noqa 301 - name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Tear down bootstrap. - name: "Tear down bootstrap."
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml" include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
- name: Ensure there is nothing left over. # noqa 301 - name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: Ensure there is nothing left over. # noqa 301 - name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: Tear down glusterfs. # noqa 301 - name: "Tear down glusterfs." # noqa 301
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Remove heketi storage service. # noqa 301 - name: "Remove heketi storage service." # noqa 301
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Remove heketi gluster role binding # noqa 301 - name: "Remove heketi gluster role binding" # noqa 301
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Remove heketi config secret # noqa 301 - name: "Remove heketi config secret" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Remove heketi db backup # noqa 301 - name: "Remove heketi db backup" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Remove heketi service account # noqa 301 - name: "Remove heketi service account" # noqa 301
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true # noqa ignore-errors ignore_errors: true
- name: Get secrets - name: "Get secrets"
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\"" command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
register: "secrets" register: "secrets"
changed_when: false changed_when: false
- name: Remove heketi storage secret - name: "Remove heketi storage secret"
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" } vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}" command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
when: "storage_query is defined" when: "storage_query is defined"
ignore_errors: true # noqa ignore-errors ignore_errors: true

View file

@ -1,65 +0,0 @@
# Offline deployment
## manage-offline-container-images.sh
Container image collecting script for offline deployment
This script has two features:
(1) Get container images from an environment which is deployed online.
(2) Deploy local container registry and register the container images to the registry.
Step(1) should be done online site as a preparation, then we bring the gotten images
to the target offline environment. if images are from a private registry,
you need to set `PRIVATE_REGISTRY` environment variable.
Then we will run step(2) for registering the images to local registry.
Step(1) can be operated with:
```shell
manage-offline-container-images.sh create
```
Step(2) can be operated with:
```shell
manage-offline-container-images.sh register
```
## generate_list.sh
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
```shell
./generate_list.sh
tree temp
temp
├── files.list
├── files.list.template
├── images.list
└── images.list.template
0 directories, 5 files
```
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
## manage-offline-files.sh
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
Step(1) generate `files.list`
```shell
./generate_list.sh
```
Step(2) download files and run nginx container
```shell
./manage-offline-files.sh
```
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.

View file

@ -1 +0,0 @@
{ "insecure-registries":["HOSTNAME:5000"] }

View file

@ -1,33 +0,0 @@
#!/bin/bash
set -eo pipefail
CURRENT_DIR=$(cd $(dirname $0); pwd)
TEMP_DIR="${CURRENT_DIR}/temp"
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
mkdir -p ${TEMP_DIR}
# generate all download files url template
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
# generate all images list template
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
# add kube-* images to images list template
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
# doesn't contain those images. That is reason why here needs to put those images into the
# list separately.
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
for i in $KUBE_IMAGES; do
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
done
# run ansible to expand templates
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1

View file

@ -1,19 +0,0 @@
---
- hosts: localhost
become: no
roles:
# Just load default variables from roles.
- role: kubespray-defaults
when: false
- role: download
when: false
tasks:
# Generate files.list and images.list files from templates.
- template:
src: ./contrib/offline/temp/{{ item }}.list.template
dest: ./contrib/offline/temp/{{ item }}.list
with_items:
- files
- images

View file

@ -1,172 +0,0 @@
#!/bin/bash
OPTION=$1
CURRENT_DIR=$(cd $(dirname $0); pwd)
TEMP_DIR="${CURRENT_DIR}/temp"
IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz"
IMAGE_DIR="${CURRENT_DIR}/container-images"
IMAGE_LIST="${IMAGE_DIR}/container-images.txt"
RETRY_COUNT=5
function create_container_image_tar() {
set -e
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
# NOTE: etcd and pause cannot be seen as pods.
# The pause image is used for --pod-infra-container-image option of kubelet.
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
IMAGES="${IMAGES} ${EXT_IMAGES}"
rm -f ${IMAGE_TAR_FILE}
rm -rf ${IMAGE_DIR}
mkdir ${IMAGE_DIR}
cd ${IMAGE_DIR}
sudo docker pull registry:latest
sudo docker save -o registry-latest.tar registry:latest
for image in ${IMAGES}
do
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
set +e
for step in $(seq 1 ${RETRY_COUNT})
do
sudo docker pull ${image}
if [ $? -eq 0 ]; then
break
fi
echo "Failed to pull ${image} at step ${step}"
if [ ${step} -eq ${RETRY_COUNT} ]; then
exit 1
fi
done
set -e
sudo docker save -o ${FILE_NAME} ${image}
# NOTE: Here removes the following repo parts from each image
# so that these parts will be replaced with Kubespray.
# - kube_image_repo: "registry.k8s.io"
# - gcr_image_repo: "gcr.io"
# - docker_image_repo: "docker.io"
# - quay_image_repo: "quay.io"
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
[ "${FIRST_PART}" = "gcr.io" ] ||
[ "${FIRST_PART}" = "docker.io" ] ||
[ "${FIRST_PART}" = "quay.io" ] ||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
fi
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
done
cd ..
sudo chown ${USER} ${IMAGE_DIR}/*
tar -zcvf ${IMAGE_TAR_FILE} ./container-images
rm -rf ${IMAGE_DIR}
echo ""
echo "${IMAGE_TAR_FILE} is created to contain your container images."
echo "Please keep this file and bring it to your offline environment."
}
function register_container_images() {
if [ ! -f ${IMAGE_TAR_FILE} ]; then
echo "${IMAGE_TAR_FILE} should exist."
exit 1
fi
if [ ! -d ${TEMP_DIR} ]; then
mkdir ${TEMP_DIR}
fi
# To avoid "http: server gave http response to https client" error.
LOCALHOST_NAME=$(hostname)
if [ -d /etc/docker/ ]; then
set -e
# Ubuntu18.04, RHEL7/CentOS7
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
elif [ -d /etc/containers/ ]; then
set -e
# RHEL8/CentOS8
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
else
echo "docker package(docker-ce, etc.) should be installed"
exit 1
fi
tar -zxvf ${IMAGE_TAR_FILE}
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
set +e
sudo docker container inspect registry >/dev/null 2>&1
if [ $? -ne 0 ]; then
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
fi
set -e
while read -r line; do
file_name=$(echo ${line} | awk '{print $1}')
raw_image=$(echo ${line} | awk '{print $2}')
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
if [ -z "${file_name}" ]; then
echo "Failed to get file_name for line ${line}"
exit 1
fi
if [ -z "${raw_image}" ]; then
echo "Failed to get raw_image for line ${line}"
exit 1
fi
if [ -z "${org_image}" ]; then
echo "Failed to get org_image for line ${line}"
exit 1
fi
if [ -z "${image_id}" ]; then
echo "Failed to get image_id for file ${file_name}"
exit 1
fi
sudo docker load -i ${IMAGE_DIR}/${file_name}
sudo docker tag ${image_id} ${new_image}
sudo docker push ${new_image}
done <<< "$(cat ${IMAGE_LIST})"
echo "Succeeded to register container images to local registry."
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
echo "- kube_image_repo"
echo "- gcr_image_repo"
echo "- docker_image_repo"
echo "- quay_image_repo"
}
if [ "${OPTION}" == "create" ]; then
create_container_image_tar
elif [ "${OPTION}" == "register" ]; then
register_container_images
else
echo "This script has two features:"
echo "(1) Get container images from an environment which is deployed online."
echo "(2) Deploy local container registry and register the container images to the registry."
echo ""
echo "Step(1) should be done online site as a preparation, then we bring"
echo "the gotten images to the target offline environment. if images are from"
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
echo "Then we will run step(2) for registering the images to local registry."
echo ""
echo "${IMAGE_TAR_FILE} is created to contain your container images."
echo "Please keep this file and bring it to your offline environment."
echo ""
echo "Step(1) can be operated with:"
echo " $ ./manage-offline-container-images.sh create"
echo ""
echo "Step(2) can be operated with:"
echo " $ ./manage-offline-container-images.sh register"
echo ""
echo "Please specify 'create' or 'register'."
echo ""
exit 1
fi

View file

@ -1,44 +0,0 @@
#!/bin/bash
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
OFFLINE_FILES_DIR_NAME="offline-files"
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
NGINX_PORT=8080
# download files
if [ ! -f "${FILES_LIST}" ]; then
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
exit 1
fi
rm -rf "${OFFLINE_FILES_DIR}"
rm "${OFFLINE_FILES_ARCHIVE}"
mkdir "${OFFLINE_FILES_DIR}"
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
# run nginx container server
if command -v nerdctl 1>/dev/null 2>&1; then
runtime="nerdctl"
elif command -v podman 1>/dev/null 2>&1; then
runtime="podman"
elif command -v docker 1>/dev/null 2>&1; then
runtime="docker"
else
echo "No supported container runtime found"
exit 1
fi
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
if [ $? -ne 0 ]; then
sudo "${runtime}" run \
--restart=always -d -p ${NGINX_PORT}:80 \
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
--name nginx nginx:alpine
fi

View file

@ -1,39 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
include /etc/nginx/default.d/*.conf;
location / {
root /usr/share/nginx/html/download;
autoindex on;
autoindex_exact_size off;
autoindex_localtime on;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}

View file

@ -1,8 +0,0 @@
[registries.search]
registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io']
[registries.insecure]
registries = ['HOSTNAME:5000']
[registries.block]
registries = []

View file

@ -1,2 +0,0 @@
---
disable_service_firewall: false

View file

@ -1,23 +0,0 @@
---
- block:
- name: List services
service_facts:
- name: Disable service firewalld
systemd:
name: firewalld
state: stopped
enabled: no
when:
"'firewalld.service' in services"
- name: Disable service ufw
systemd:
name: ufw
state: stopped
enabled: no
when:
"'ufw.service' in services"
when:
- disable_service_firewall is defined and disable_service_firewall

View file

@ -9,8 +9,8 @@ Summary: Ansible modules for installing Kubernetes
Group: System Environment/Libraries Group: System Environment/Libraries
License: ASL 2.0 License: ASL 2.0
Url: https://github.com/kubernetes-sigs/kubespray Url: https://github.com/kubernetes-incubator/kubespray
Source0: https://github.com/kubernetes-sigs/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
BuildArch: noarch BuildArch: noarch
BuildRequires: git BuildRequires: git
@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini %doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg %config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%license %{_docdir}/%{name}/LICENSE %license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/ %{_datarootdir}/%{name}/roles/

View file

@ -1,3 +1,2 @@
*.tfstate* *.tfstate*
.terraform.lock.hcl
.terraform .terraform

View file

@ -1,43 +1,40 @@
# Kubernetes on AWS with Terraform ## Kubernetes on AWS with Terraform
## Overview **Overview:**
This project will create: This project will create:
* VPC with Public and Private Subnets in # Availability Zones
* Bastion Hosts and NAT Gateways in the Public Subnet
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
* even distributed over the # of Availability Zones
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
- VPC with Public and Private Subnets in # Availability Zones **Requirements**
- Bastion Hosts and NAT Gateways in the Public Subnet
- A dynamic number of masters, etcd, and worker nodes in the Private Subnet
- even distributed over the # of Availability Zones
- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
## Requirements
- Terraform 0.12.0 or newer - Terraform 0.12.0 or newer
## How to Use **How to Use:**
- Export the variables for your AWS credentials or edit `credentials.tfvars`: - Export the variables for your AWS credentials or edit `credentials.tfvars`:
```commandline ```
export TF_VAR_AWS_ACCESS_KEY_ID="www" export TF_VAR_AWS_ACCESS_KEY_ID="www"
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx" export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
export TF_VAR_AWS_SSH_KEY_NAME="yyy" export TF_VAR_AWS_SSH_KEY_NAME="yyy"
export TF_VAR_AWS_DEFAULT_REGION="zzz" export TF_VAR_AWS_DEFAULT_REGION="zzz"
``` ```
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below. - Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
- Create an AWS EC2 SSH Key - Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials - Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example: Example:
```commandline ```commandline
terraform apply -var-file=credentials.tfvars terraform apply -var-file=credentials.tfvars
``` ```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
Ansible automatically detects bastion and changes ssh_args
```commandline ```commandline
ssh -F ./ssh-bastion.conf user@$ip ssh -F ./ssh-bastion.conf user@$ip
``` ```
@ -45,19 +42,14 @@ ssh -F ./ssh-bastion.conf user@$ip
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
Example (this one assumes you are using Ubuntu) Example (this one assumes you are using Ubuntu)
```commandline ```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
``` ```
***Using other distrib than Ubuntu*** ***Using other distrib than Ubuntu***
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf. If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
For example, to use: For example, to use:
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with - Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@ -73,11 +65,8 @@ data "aws_ami" "distro" {
owners = ["379101102735"] owners = ["379101102735"]
} }
```
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with - Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@ -93,11 +82,8 @@ data "aws_ami" "distro" {
owners = ["099720109477"] owners = ["099720109477"]
} }
```
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with - Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@ -113,49 +99,23 @@ data "aws_ami" "distro" {
owners = ["688023202711"] owners = ["688023202711"]
} }
```
## Connecting to Kubernetes **Troubleshooting**
You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder. ***Remaining AWS IAM Instance Profile***:
```commandline
# Get the controller's IP address.
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
# Get the hostname of the load balancer.
LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2)
# Get the controller's SSH fingerprint.
ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1
ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null
# Get the kubeconfig from the controller.
mkdir -p ~/.kube
ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf"
scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config
sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config
kubectl get nodes
```
## Troubleshooting
### Remaining AWS IAM Instance Profile
If the cluster was destroyed without using Terraform it is possible that If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command: the `AWS CLI` with the following command:
```
```commandline
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name> aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
``` ```
### Ansible Inventory doesn't get created ***Ansible Inventory doesn't get created:***
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file. It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
## Architecture **Architecture**
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.

View file

@ -20,20 +20,20 @@ module "aws-vpc" {
aws_cluster_name = var.aws_cluster_name aws_cluster_name = var.aws_cluster_name
aws_vpc_cidr_block = var.aws_vpc_cidr_block aws_vpc_cidr_block = var.aws_vpc_cidr_block
aws_avail_zones = data.aws_availability_zones.available.names aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_cidr_subnets_private = var.aws_cidr_subnets_private aws_cidr_subnets_private = var.aws_cidr_subnets_private
aws_cidr_subnets_public = var.aws_cidr_subnets_public aws_cidr_subnets_public = var.aws_cidr_subnets_public
default_tags = var.default_tags default_tags = var.default_tags
} }
module "aws-nlb" { module "aws-elb" {
source = "./modules/nlb" source = "./modules/elb"
aws_cluster_name = var.aws_cluster_name aws_cluster_name = var.aws_cluster_name
aws_vpc_id = module.aws-vpc.aws_vpc_id aws_vpc_id = module.aws-vpc.aws_vpc_id
aws_avail_zones = data.aws_availability_zones.available.names aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
aws_nlb_api_port = var.aws_nlb_api_port aws_elb_api_port = var.aws_elb_api_port
k8s_secure_api_port = var.k8s_secure_api_port k8s_secure_api_port = var.k8s_secure_api_port
default_tags = var.default_tags default_tags = var.default_tags
} }
@ -52,19 +52,20 @@ module "aws-iam" {
resource "aws_instance" "bastion-server" { resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id ami = data.aws_ami.distro.id
instance_type = var.aws_bastion_size instance_type = var.aws_bastion_size
count = var.aws_bastion_num count = length(var.aws_cidr_subnets_public)
associate_public_ip_address = true associate_public_ip_address = true
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
Cluster = var.aws_cluster_name "Cluster", "${var.aws_cluster_name}",
Role = "bastion-${var.aws_cluster_name}-${count.index}" "Role", "bastion-${var.aws_cluster_name}-${count.index}"
})) ))
} }
/* /*
@ -78,28 +79,25 @@ resource "aws_instance" "k8s-master" {
count = var.aws_kube_master_num count = var.aws_kube_master_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device { iam_instance_profile = module.aws-iam.kube-master-profile
volume_size = var.aws_kube_master_disk_size
}
iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "master" "Role", "master"
})) ))
} }
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" { resource "aws_elb_attachment" "attach_master_nodes" {
count = var.aws_kube_master_num count = var.aws_kube_master_num
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn elb = module.aws-elb.aws_elb_api_id
target_id = element(aws_instance.k8s-master.*.private_ip, count.index) instance = element(aws_instance.k8s-master.*.id, count.index)
} }
resource "aws_instance" "k8s-etcd" { resource "aws_instance" "k8s-etcd" {
@ -108,21 +106,18 @@ resource "aws_instance" "k8s-etcd" {
count = var.aws_etcd_num count = var.aws_etcd_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_etcd_disk_size
}
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "etcd" "Role", "etcd"
})) ))
} }
resource "aws_instance" "k8s-worker" { resource "aws_instance" "k8s-worker" {
@ -131,22 +126,19 @@ resource "aws_instance" "k8s-worker" {
count = var.aws_kube_worker_num count = var.aws_kube_worker_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_kube_worker_disk_size
}
iam_instance_profile = module.aws-iam.kube-worker-profile iam_instance_profile = module.aws-iam.kube-worker-profile
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "worker" "Role", "worker"
})) ))
} }
/* /*
@ -160,11 +152,11 @@ data "template_file" "inventory" {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
list_master = join("\n", aws_instance.k8s-master.*.private_dns) list_master = join("\n", aws_instance.k8s-master.*.private_dns)
list_node = join("\n", aws_instance.k8s-worker.*.private_dns) list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip)) list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns))) elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
} }
} }

View file

@ -0,0 +1,57 @@
resource "aws_security_group" "aws-elb" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
vpc_id = var.aws_vpc_id
tags = merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
))
}
resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress"
from_port = var.aws_elb_api_port
to_port = var.k8s_secure_api_port
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id
}
resource "aws_security_group_rule" "aws-allow-api-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id
}
# Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = var.aws_subnet_ids_public
security_groups = [aws_security_group.aws-elb.id]
listener {
instance_port = var.k8s_secure_api_port
instance_protocol = "tcp"
lb_port = var.aws_elb_api_port
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "TCP:${var.k8s_secure_api_port}"
interval = 30
}
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
tags = merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
))
}

View file

@ -0,0 +1,7 @@
output "aws_elb_api_id" {
value = aws_elb.aws-elb-api.id
}
output "aws_elb_api_fqdn" {
value = aws_elb.aws-elb-api.dns_name
}

View file

@ -6,8 +6,8 @@ variable "aws_vpc_id" {
description = "AWS VPC ID" description = "AWS VPC ID"
} }
variable "aws_nlb_api_port" { variable "aws_elb_api_port" {
description = "Port for AWS NLB" description = "Port for AWS ELB"
} }
variable "k8s_secure_api_port" { variable "k8s_secure_api_port" {
@ -16,15 +16,15 @@ variable "k8s_secure_api_port" {
variable "aws_avail_zones" { variable "aws_avail_zones" {
description = "Availability Zones Used" description = "Availability Zones Used"
type = list(string) type = "list"
} }
variable "aws_subnet_ids_public" { variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets" description = "IDs of Public Subnets"
type = list(string) type = "list"
} }
variable "default_tags" { variable "default_tags" {
description = "Tags for all resources" description = "Tags for all resources"
type = map(string) type = "map"
} }

View file

@ -1,6 +1,6 @@
#Add AWS Roles for Kubernetes #Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube_control_plane" { resource "aws_iam_role" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master" name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF assume_role_policy = <<EOF
@ -40,9 +40,9 @@ EOF
#Add AWS Policies for Kubernetes #Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube_control_plane" { resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master" name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube_control_plane.id role = aws_iam_role.kube-master.id
policy = <<EOF policy = <<EOF
{ {
@ -130,9 +130,9 @@ EOF
#Create AWS Instance Profiles #Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube_control_plane" { resource "aws_iam_instance_profile" "kube-master" {
name = "kube_${var.aws_cluster_name}_master_profile" name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube_control_plane.name role = aws_iam_role.kube-master.name
} }
resource "aws_iam_instance_profile" "kube-worker" { resource "aws_iam_instance_profile" "kube-worker" {

View file

@ -1,5 +1,5 @@
output "kube_control_plane-profile" { output "kube-master-profile" {
value = aws_iam_instance_profile.kube_control_plane.name value = aws_iam_instance_profile.kube-master.name
} }
output "kube-worker-profile" { output "kube-worker-profile" {

View file

@ -1,41 +0,0 @@
# Create a new AWS NLB for K8S API
resource "aws_lb" "aws-nlb-api" {
name = "kubernetes-nlb-${var.aws_cluster_name}"
load_balancer_type = "network"
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
idle_timeout = 400
enable_cross_zone_load_balancing = true
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
}))
}
# Create a new AWS NLB Instance Target Group
resource "aws_lb_target_group" "aws-nlb-api-tg" {
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
port = var.k8s_secure_api_port
protocol = "TCP"
target_type = "ip"
vpc_id = var.aws_vpc_id
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
interval = 30
protocol = "HTTPS"
path = "/healthz"
}
}
# Create a new AWS NLB Listener listen to target group
resource "aws_lb_listener" "aws-nlb-api-listener" {
load_balancer_arn = aws_lb.aws-nlb-api.arn
port = var.aws_nlb_api_port
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
}
}

View file

@ -1,11 +0,0 @@
output "aws_nlb_api_id" {
value = aws_lb.aws-nlb-api.id
}
output "aws_nlb_api_fqdn" {
value = aws_lb.aws-nlb-api.dns_name
}
output "aws_nlb_api_tg_arn" {
value = aws_lb_target_group.aws-nlb-api-tg.arn
}

View file

@ -5,9 +5,9 @@ resource "aws_vpc" "cluster-vpc" {
enable_dns_support = true enable_dns_support = true
enable_dns_hostnames = true enable_dns_hostnames = true
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-vpc" "Name", "kubernetes-${var.aws_cluster_name}-vpc"
})) ))
} }
resource "aws_eip" "cluster-nat-eip" { resource "aws_eip" "cluster-nat-eip" {
@ -18,22 +18,21 @@ resource "aws_eip" "cluster-nat-eip" {
resource "aws_internet_gateway" "cluster-vpc-internetgw" { resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-internetgw" "Name", "kubernetes-${var.aws_cluster_name}-internetgw"
})) ))
} }
resource "aws_subnet" "cluster-vpc-subnets-public" { resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_public) count = length(var.aws_avail_zones)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones)) availability_zone = element(var.aws_avail_zones, count.index)
cidr_block = element(var.aws_cidr_subnets_public, count.index) cidr_block = element(var.aws_cidr_subnets_public, count.index)
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public" "Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared" "kubernetes.io/cluster/${var.aws_cluster_name}", "member"
"kubernetes.io/role/elb" = "1" ))
}))
} }
resource "aws_nat_gateway" "cluster-nat-gateway" { resource "aws_nat_gateway" "cluster-nat-gateway" {
@ -44,15 +43,13 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
resource "aws_subnet" "cluster-vpc-subnets-private" { resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_private) count = length(var.aws_avail_zones)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones)) availability_zone = element(var.aws_avail_zones, count.index)
cidr_block = element(var.aws_cidr_subnets_private, count.index) cidr_block = element(var.aws_cidr_subnets_private, count.index)
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private" "Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared" ))
"kubernetes.io/role/internal-elb" = "1"
}))
} }
#Routing in VPC #Routing in VPC
@ -67,9 +64,9 @@ resource "aws_route_table" "kubernetes-public" {
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
} }
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-routetable-public" "Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
})) ))
} }
resource "aws_route_table" "kubernetes-private" { resource "aws_route_table" "kubernetes-private" {
@ -81,9 +78,9 @@ resource "aws_route_table" "kubernetes-private" {
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index) nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
} }
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
})) ))
} }
resource "aws_route_table_association" "kubernetes-public" { resource "aws_route_table_association" "kubernetes-public" {
@ -104,9 +101,9 @@ resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup" name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = aws_vpc.cluster-vpc.id vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-securitygroup" "Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
})) ))
} }
resource "aws_security_group_rule" "allow-all-ingress" { resource "aws_security_group_rule" "allow-all-ingress" {

View file

@ -8,20 +8,20 @@ variable "aws_cluster_name" {
variable "aws_avail_zones" { variable "aws_avail_zones" {
description = "AWS Availability Zones Used" description = "AWS Availability Zones Used"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_private" { variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones" description = "CIDR Blocks for private subnets in Availability zones"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_public" { variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones" description = "CIDR Blocks for public subnets in Availability zones"
type = list(string) type = "list"
} }
variable "default_tags" { variable "default_tags" {
description = "Default tags for all resources" description = "Default tags for all resources"
type = map(string) type = "map"
} }

View file

@ -11,11 +11,11 @@ output "workers" {
} }
output "etcd" { output "etcd" {
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))) value = join("\n", aws_instance.k8s-etcd.*.private_ip)
} }
output "aws_nlb_api_fqdn" { output "aws_elb_api_fqdn" {
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}" value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
} }
output "inventory" { output "inventory" {

View file

@ -9,8 +9,6 @@ aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
#Bastion Host #Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t2.medium" aws_bastion_size = "t2.medium"
#Kubernetes Cluster #Kubernetes Cluster
@ -19,26 +17,22 @@ aws_kube_master_num = 3
aws_kube_master_size = "t2.medium" aws_kube_master_size = "t2.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3 aws_etcd_num = 3
aws_etcd_size = "t2.medium" aws_etcd_size = "t2.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium" aws_kube_worker_size = "t2.medium"
aws_kube_worker_disk_size = 50 #Settings AWS ELB
#Settings AWS NLB aws_elb_api_port = 6443
aws_nlb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
default_tags = { default_tags = {
# Env = "devtest" # Product = "kubernetes" # Env = "devtest" # Product = "kubernetes"
} }

View file

@ -7,21 +7,22 @@ ${public_ip_address_bastion}
[bastion] [bastion]
${public_ip_address_bastion} ${public_ip_address_bastion}
[kube_control_plane] [kube-master]
${list_master} ${list_master}
[kube_node]
[kube-node]
${list_node} ${list_node}
[etcd] [etcd]
${list_etcd} ${list_etcd}
[calico_rr]
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master
calico_rr
[k8s_cluster:vars]
${nlb_api_fqdn} [k8s-cluster:vars]
${elb_api_fqdn}

View file

@ -6,34 +6,26 @@ aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
# single AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/20"]
#aws_cidr_subnets_public = ["10.250.224.0/20"]
# 3+ AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]
#Bastion Host #Bastion Host
aws_bastion_num = 1 aws_bastion_size = "t2.medium"
aws_bastion_size = "t3.small"
#Kubernetes Cluster #Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 0 aws_kube_master_num = 3
aws_etcd_size = "t3.medium" aws_kube_master_size = "t2.medium"
aws_etcd_disk_size = 50
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t3.medium" aws_kube_worker_size = "t2.medium"
aws_kube_worker_disk_size = 50
#Settings AWS ELB #Settings AWS ELB
aws_nlb_api_port = 6443
aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
default_tags = { default_tags = {
# Env = "devtest" # Env = "devtest"

View file

@ -8,26 +8,25 @@ aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"] aws_avail_zones = ["eu-central-1a","eu-central-1b"]
#Bastion Host #Bastion Host
aws_bastion_num = 1 aws_bastion_ami = "ami-5900cc36"
aws_bastion_size = "t3.small" aws_bastion_size = "t2.small"
#Kubernetes Cluster #Kubernetes Cluster
aws_kube_master_num = 3 aws_kube_master_num = 3
aws_kube_master_size = "t3.medium" aws_kube_master_size = "t2.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3 aws_etcd_num = 3
aws_etcd_size = "t3.medium" aws_etcd_size = "t2.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t3.medium" aws_kube_worker_size = "t2.medium"
aws_kube_worker_disk_size = 50
aws_cluster_ami = "ami-903df7ff"
#Settings AWS ELB #Settings AWS ELB
aws_nlb_api_port = 6443
aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = 0.0.0.0
default_tags = { }
inventory_file = "../../../inventory/hosts"

View file

@ -25,7 +25,7 @@ data "aws_ami" "distro" {
filter { filter {
name = "name" name = "name"
values = ["debian-10-amd64-*"] values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
} }
filter { filter {
@ -33,7 +33,7 @@ data "aws_ami" "distro" {
values = ["hvm"] values = ["hvm"]
} }
owners = ["136693071363"] # Debian-10 owners = ["099720109477"] # Canonical
} }
//AWS VPC Variables //AWS VPC Variables
@ -44,12 +44,12 @@ variable "aws_vpc_cidr_block" {
variable "aws_cidr_subnets_private" { variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability Zones" description = "CIDR Blocks for private subnets in Availability Zones"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_public" { variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability Zones" description = "CIDR Blocks for public subnets in Availability Zones"
type = list(string) type = "list"
} }
//AWS EC2 Settings //AWS EC2 Settings
@ -63,18 +63,10 @@ variable "aws_bastion_size" {
* The number should be divisable by the number of used * The number should be divisable by the number of used
* AWS Availability Zones without an remainder. * AWS Availability Zones without an remainder.
*/ */
variable "aws_bastion_num" {
description = "Number of Bastion Nodes"
}
variable "aws_kube_master_num" { variable "aws_kube_master_num" {
description = "Number of Kubernetes Master Nodes" description = "Number of Kubernetes Master Nodes"
} }
variable "aws_kube_master_disk_size" {
description = "Disk size for Kubernetes Master Nodes (in GiB)"
}
variable "aws_kube_master_size" { variable "aws_kube_master_size" {
description = "Instance size of Kube Master Nodes" description = "Instance size of Kube Master Nodes"
} }
@ -83,10 +75,6 @@ variable "aws_etcd_num" {
description = "Number of etcd Nodes" description = "Number of etcd Nodes"
} }
variable "aws_etcd_disk_size" {
description = "Disk size for etcd Nodes (in GiB)"
}
variable "aws_etcd_size" { variable "aws_etcd_size" {
description = "Instance size of etcd Nodes" description = "Instance size of etcd Nodes"
} }
@ -95,20 +83,16 @@ variable "aws_kube_worker_num" {
description = "Number of Kubernetes Worker Nodes" description = "Number of Kubernetes Worker Nodes"
} }
variable "aws_kube_worker_disk_size" {
description = "Disk size for Kubernetes Worker Nodes (in GiB)"
}
variable "aws_kube_worker_size" { variable "aws_kube_worker_size" {
description = "Instance size of Kubernetes Worker Nodes" description = "Instance size of Kubernetes Worker Nodes"
} }
/* /*
* AWS NLB Settings * AWS ELB Settings
* *
*/ */
variable "aws_nlb_api_port" { variable "aws_elb_api_port" {
description = "Port for AWS NLB" description = "Port for AWS ELB"
} }
variable "k8s_secure_api_port" { variable "k8s_secure_api_port" {
@ -117,7 +101,7 @@ variable "k8s_secure_api_port" {
variable "default_tags" { variable "default_tags" {
description = "Default tags for all resources" description = "Default tags for all resources"
type = map(string) type = "map"
} }
variable "inventory_file" { variable "inventory_file" {

View file

@ -1,152 +0,0 @@
# Kubernetes on Exoscale with Terraform
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+-----------------------+
+---------------+ | +--------------+ |
| | | | +--------------+ |
| API server LB +---------> | | | |
| | | | | Master/etcd | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
+---------------+ | +--------------+ |
| | | | +--------------+ |
| Ingress LB +---------> | | | |
| | | | | Worker | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
+-----------------------+
```
## Requirements
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
## Quickstart
NOTE: *Assumes you are at the root of the kubespray repo*
Copy the sample inventory for your cluster and copy the default terraform variables.
```bash
CLUSTER=my-exoscale-cluster
cp -r inventory/sample inventory/$CLUSTER
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER
```
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
```bash
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
$EDITOR default.tfvars
```
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
The file should look like something like this:
```ini
[cloudstack]
key = <API key>
secret = <API secret>
```
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
### Encrypted credentials
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
```bash
cat << EOF > cloudstack.ini
[cloudstack]
key =
secret =
EOF
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
sops cloudstack.ini
```
Run terraform to create the infrastructure
```bash
terraform init ../../contrib/terraform/exoscale
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
```
If your cloudstack credentials file is encrypted using sops, run the following:
```bash
terraform init ../../contrib/terraform/exoscale
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
You can now copy your inventory file and use it with kubespray to set up a cluster.
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
```bash
ansible -i inventory.ini -m ping all
```
Example to use this with the default sample inventory:
```bash
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
```
## Teardown
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
```bash
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
```
## Variables
### Required
* `ssh_public_keys`: List of public SSH keys to install on all machines
* `zone`: The zone where to run the cluster
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `size`: The size to use
* `boot_disk`: The boot disk to use
* `image_name`: Name of the image
* `root_partition_size`: Size *(in GB)* for the root partition
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
### Optional
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
An example variables file can be found `default.tfvars`
## Known limitations
### Only single disk
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
### No Kubernetes API
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.

View file

@ -1,65 +0,0 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Medium",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

Some files were not shown because too many files have changed in this diff Show more