Compare commits
18 commits
master
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
|
cd832eadea | ||
|
d28a6d68f9 | ||
|
f4b6bce69f | ||
|
8e53d81146 | ||
|
28ee071bd6 | ||
|
0ba1c8ff0a | ||
|
d6f859619c | ||
|
8a3bd6ba72 | ||
|
d3fca7d47a | ||
|
587f7b33a7 | ||
|
d3e8fd808e | ||
|
6bfc133d6c | ||
|
3d6b9d6c15 | ||
|
39fa9503d9 | ||
|
051e08e31c | ||
|
dd539cf360 | ||
|
31094b1768 | ||
|
1f4ef0e86e |
1292 changed files with 15954 additions and 47950 deletions
|
@ -2,8 +2,15 @@
|
|||
parseable: true
|
||||
skip_list:
|
||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||
|
||||
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
|
||||
# The following rules throw errors.
|
||||
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||
- '301'
|
||||
- '302'
|
||||
- '303'
|
||||
- '305'
|
||||
- '306'
|
||||
- '404'
|
||||
- '503'
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
|
@ -18,13 +25,3 @@ skip_list:
|
|||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
root = true
|
||||
|
||||
[*.{yaml,yml,yml.j2,yaml.j2}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
|
||||
[{Dockerfile}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
2
.github/ISSUE_TEMPLATE/support.md
vendored
2
.github/ISSUE_TEMPLATE/support.md
vendored
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: kind/support
|
||||
labels: triage/support
|
||||
|
||||
---
|
||||
|
||||
|
|
18
.gitignore
vendored
18
.gitignore
vendored
|
@ -1,12 +1,8 @@
|
|||
.vagrant
|
||||
*.retry
|
||||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
|
@ -14,19 +10,15 @@ contrib/offline/offline-files.tar.gz
|
|||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
deploy.sh
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
!inventory/local
|
||||
!inventory/sample
|
||||
!inventory/c12s-sample
|
||||
inventory/*/artifacts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
|
@ -105,13 +97,3 @@ target/
|
|||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
|
|
@ -8,7 +8,7 @@ stages:
|
|||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.20.0
|
||||
KUBESPRAY_VERSION: v2.12.9
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
|
@ -16,7 +16,6 @@ variables:
|
|||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
|
@ -27,20 +26,16 @@ variables:
|
|||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
|
@ -48,13 +43,11 @@ before_script:
|
|||
- packet
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
@ -81,4 +74,3 @@ include:
|
|||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
|
|
@ -14,7 +14,7 @@ vagrant-validate:
|
|||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
VAGRANT_VERSION: 2.2.4
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
@ -23,8 +23,9 @@ ansible-lint:
|
|||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- ansible-lint -v
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
|
@ -52,7 +53,6 @@ tox-inventory-builder:
|
|||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
@ -64,23 +64,9 @@ markdownlint:
|
|||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli@0.22.0
|
||||
- npm install -g markdownlint-cli
|
||||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
|
@ -1,139 +1,78 @@
|
|||
---
|
||||
.packet:
|
||||
.packet: &packet
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
CI_PLATFORM: "packet"
|
||||
SSH_USER: "kubespray"
|
||||
tags:
|
||||
- packet
|
||||
except: [triggers]
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
only: [/^pr-.*$/]
|
||||
extends: .packet
|
||||
except: ['triggers']
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu18-aio-docker:
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
packet_centos7-crio:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian10-calico:
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
|
@ -141,188 +80,130 @@ packet_centos7-calico-ha-once-localhost:
|
|||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
packet_fedora30-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
# Contiv does not work in k8s v1.16
|
||||
# packet_ubuntu16-contiv-sep:
|
||||
# stage: deploy-part2
|
||||
# extends: .packet
|
||||
# when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha:
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-canal-ha:
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora31-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
|
|
|
@ -4,13 +4,13 @@ shellcheck:
|
|||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.7.1
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
|
|
@ -12,15 +12,12 @@
|
|||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
|
@ -28,8 +25,8 @@
|
|||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
|
@ -38,7 +35,6 @@
|
|||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
variables:
|
||||
|
@ -56,48 +52,28 @@
|
|||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-metal:
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
|
@ -111,7 +87,7 @@ tf-validate-upcloud:
|
|||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
|
@ -122,114 +98,80 @@ tf-validate-upcloud:
|
|||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
OS_PROJECT_NAME: "9361447987648822"
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: 8XuhBMfkKVrk
|
||||
OS_REGION_NAME: UK1
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
|
||||
# Elastx is generously donating resources for Kubespray on Openstack CI
|
||||
# Contacts: @gix @bl0m1
|
||||
.elastx_variables: &elastx_variables
|
||||
OS_AUTH_URL: https://ops.elastx.cloud:5000
|
||||
OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4
|
||||
OS_PROJECT_NAME: kubespray_ci
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: kubespray@root314.com
|
||||
OS_REGION_NAME: se-sto
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_floatingip_pool: "elx-public1"
|
||||
TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]'
|
||||
TF_VAR_use_access_ip: "0"
|
||||
TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828"
|
||||
TF_VAR_network_name: "ci-$CI_JOB_ID"
|
||||
TF_VAR_az_list: '["sto1"]'
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
# .ovh_variables: &ovh_variables
|
||||
# OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
# OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
# OS_PROJECT_NAME: "9361447987648822"
|
||||
# OS_USER_DOMAIN_NAME: Default
|
||||
# OS_PROJECT_DOMAIN_ID: default
|
||||
# OS_USERNAME: 8XuhBMfkKVrk
|
||||
# OS_REGION_NAME: UK1
|
||||
# OS_INTERFACE: public
|
||||
# OS_IDENTITY_API_VERSION: "3"
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# tf-ovh_coreos-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: core
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
# TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
# TF_VAR_image: "CoreOS Stable"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
|
|
@ -1,10 +1,26 @@
|
|||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
SSH_USER: "kubespray"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
tags: [c3.small.x86]
|
||||
|
@ -15,19 +31,12 @@
|
|||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
- vagrant up
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
- vagrant destroy --force
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
|
@ -37,31 +46,4 @@ vagrant_ubuntu18-flannel:
|
|||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
when: manual
|
|
@ -1,3 +1,2 @@
|
|||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
repos:
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
|
@ -1,9 +1,6 @@
|
|||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
|
|
|
@ -6,22 +6,11 @@
|
|||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||
|
||||
#### Molecule
|
||||
|
||||
|
@ -38,9 +27,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
|||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
|
|
48
Dockerfile
48
Dockerfile
|
@ -1,37 +1,21 @@
|
|||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220531
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,7 +1,5 @@
|
|||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
|
2
OWNERS
2
OWNERS
|
@ -4,5 +4,3 @@ approvers:
|
|||
- kubespray-approvers
|
||||
reviewers:
|
||||
- kubespray-reviewers
|
||||
emeritus_approvers:
|
||||
- kubespray-emeritus_approvers
|
|
@ -1,26 +1,19 @@
|
|||
aliases:
|
||||
kubespray-approvers:
|
||||
- ant31
|
||||
- mattymo
|
||||
- atoms
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- riverzhang
|
||||
- verwilst
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- holmsten
|
||||
- bozzo
|
||||
- floryut
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- woopstar
|
||||
|
|
134
README.md
134
README.md
|
@ -5,7 +5,7 @@
|
|||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
|
@ -19,10 +19,10 @@ To deploy the cluster you can use :
|
|||
|
||||
#### Usage
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
|
@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
|
@ -48,24 +48,11 @@ As a consequence, `ansible-playbook` command will fail with:
|
|||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.20.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
|
@ -76,11 +63,10 @@ python -V && pip -V
|
|||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
Install the necessary requirements
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
|
@ -89,7 +75,6 @@ vagrant up
|
|||
- [Requirements](#requirements)
|
||||
- [Kubespray vs ...](docs/comparisons.md)
|
||||
- [Getting started](docs/getting-started.md)
|
||||
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
|
||||
- [Ansible inventory and tags](docs/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/integration.md)
|
||||
- [Deployment data variables](docs/vars.md)
|
||||
|
@ -97,7 +82,7 @@ vagrant up
|
|||
- [HA mode](docs/ha-mode.md)
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
|
||||
- [CoreOS bootstrap](docs/coreos.md)
|
||||
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
|
@ -107,85 +92,59 @@ vagrant up
|
|||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Equinix Metal](docs/equinix-metal.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
- **Container Linux by CoreOS**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md)
|
||||
- **Fedora** 30, 31
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.14
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.12
|
||||
- [etcd](https://github.com/coreos/etcd) v3.3.12
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [containerd](https://containerd.io/) v1.2.13
|
||||
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
|
||||
- [calico](https://github.com/projectcalico/calico) v3.13.2
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||
- [cilium](https://github.com/cilium/cilium) v1.7.2
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.4.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.6.2
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.16
|
||||
- [helm](https://helm.sh/) v3.9.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.5
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.30.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.23**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- **Minimum required version of Kubernetes is v1.15**
|
||||
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
|
@ -215,6 +174,9 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
|||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
|
@ -233,12 +195,6 @@ The choice is defined with the variable `kube_network_plugin`. There is also an
|
|||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
|
||||
## Community docs and resources
|
||||
|
||||
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||
|
@ -250,12 +206,10 @@ See also [Network checker](docs/netcheck.md).
|
|||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by Google (GCE)
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
|
53
RELEASE.md
53
RELEASE.md
|
@ -2,18 +2,17 @@
|
|||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
|
@ -47,37 +46,3 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
|
|
|
@ -9,7 +9,5 @@
|
|||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
|
103
Vagrantfile
vendored
103
Vagrantfile
vendored
|
@ -9,34 +9,32 @@ Vagrant.require_version ">= 2.0.0"
|
|||
|
||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||
|
||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "geerlingguy/ubuntu2004", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"fedora30" => {box: "fedora/30-cloud-base", user: "vagrant"},
|
||||
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
|
@ -48,17 +46,14 @@ $num_instances ||= 3
|
|||
$instance_name_prefix ||= "k8s"
|
||||
$vm_gui ||= false
|
||||
$vm_memory ||= 2048
|
||||
$vm_cpus ||= 2
|
||||
$vm_cpus ||= 1
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
$multi_networking ||= false
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
|
@ -71,14 +66,10 @@ $kube_node_instances_with_disks_size ||= "20G"
|
|||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
|
@ -92,16 +83,16 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
|||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
@ -151,12 +142,9 @@ Vagrant.configure("2") do |config|
|
|||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
lv.nested = $libvirt_nested
|
||||
lv.cpu_mode = "host-model"
|
||||
lv.memory = $vm_memory
|
||||
lv.cpus = $vm_cpus
|
||||
lv.default_prefix = 'kubespray'
|
||||
|
@ -173,7 +161,7 @@ Vagrant.configure("2") do |config|
|
|||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -186,55 +174,30 @@ Vagrant.configure("2") do |config|
|
|||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst
|
||||
end
|
||||
else
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
node.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"flannel_interface": "eth1",
|
||||
"kube_network_plugin": $network_plugin,
|
||||
"kube_network_plugin_multus": $multi_networking,
|
||||
"download_run_once": $download_run_once,
|
||||
"download_run_once": "True",
|
||||
"download_localhost": "False",
|
||||
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
||||
# Make kubespray cache even when download_run_once is false
|
||||
"download_force_cache": $download_force_cache,
|
||||
"download_force_cache": "True",
|
||||
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
||||
"download_keep_remote_cache": "False",
|
||||
"docker_rpm_keepcache": "1",
|
||||
"docker_keepcache": "1",
|
||||
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
||||
"kubeconfig_localhost": "True",
|
||||
"kubectl_localhost": "True",
|
||||
|
@ -244,11 +207,9 @@ Vagrant.configure("2") do |config|
|
|||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
|
@ -258,14 +219,12 @@ Vagrant.configure("2") do |config|
|
|||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
---
|
||||
theme: jekyll-theme-slate
|
||||
theme: jekyll-theme-slate
|
|
@ -1,8 +1,9 @@
|
|||
[ssh_connection]
|
||||
pipelining=True
|
||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
|
@ -10,11 +11,11 @@ host_key_checking=False
|
|||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
|
|
@ -3,31 +3,13 @@
|
|||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.14.0
|
||||
minimal_ansible_version: 2.8.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
that: "'127.0.0.1' | ipaddr"
|
||||
tags:
|
||||
- check
|
||||
|
||||
# CentOS 7 provides too old jinja version
|
||||
- name: "Check that jinja is not too old (install via pip)"
|
||||
assert:
|
||||
msg: "Your Jinja version is too old, install via pip"
|
||||
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
|
||||
tags:
|
||||
- check
|
||||
|
|
76
cluster.yml
76
cluster.yml
|
@ -2,43 +2,50 @@
|
|||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
- hosts: k8s-cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
- hosts: k8s-cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd:kube_control_plane
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
|
@ -46,12 +53,11 @@
|
|||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
|
@ -59,59 +65,52 @@
|
|||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube_control_plane
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico_rr
|
||||
- hosts: calico-rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
|
@ -119,13 +118,18 @@
|
|||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
|
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
|||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
|
@ -69,8 +69,8 @@ class SearchEC2Tags(object):
|
|||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
boto3 # Apache-2.0
|
|
@ -24,14 +24,14 @@ experience.
|
|||
|
||||
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||
also removes all public IPs from all other VMs.
|
||||
also removes all public IPs from all other VMs.
|
||||
|
||||
## Generating and applying
|
||||
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
./apply-rg.sh <resource_group_name>
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
|
@ -42,26 +42,24 @@ take care about creating/modifying whatever is needed.
|
|||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
./clear-rg.sh <resource_group_name>
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
./generate-inventory.sh <resource_group_name>
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
$ cd kubespray-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
|
|
@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
else
|
||||
echo "Azure cli not found"
|
||||
fi
|
||||
|
|
19
contrib/azurerm/apply-rg_2.sh
Executable file
19
contrib/azurerm/apply-rg_2.sh
Executable file
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
|
@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
ansible-playbook generate-templates.yml
|
||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||
fi
|
||||
|
|
14
contrib/azurerm/clear-rg_2.sh
Executable file
14
contrib/azurerm/clear-rg_2.sh
Executable file
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
|
||||
- name: Query Azure VMs # noqa 301
|
||||
- name: Query Azure VMs
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
|
@ -12,4 +12,3 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,13 +21,13 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
---
|
||||
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
- name: Query Azure VMs IPs
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
- name: Query Azure VMs Roles
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
|
@ -22,10 +22,8 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,14 +21,14 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
|
|
|
@ -8,13 +8,11 @@
|
|||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
|
|
@ -144,7 +144,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_control_plane,etcd"
|
||||
"roles": "kube-master,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_node"
|
||||
"roles": "kube-node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
@ -112,4 +112,4 @@
|
|||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -6,7 +6,6 @@ to serve as Kubernetes "nodes", which in turn will run
|
|||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
|
@ -28,7 +27,7 @@ See below for a complete successful run:
|
|||
|
||||
1. Create the node containers
|
||||
|
||||
```shell
|
||||
~~~~
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
|
@ -37,15 +36,15 @@ ansible-playbook -i hosts dind-cluster.yaml
|
|||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
```
|
||||
~~~~
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
```
|
||||
~~~
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
|
@ -53,33 +52,33 @@ Note that there's coupling between above created node containers
|
|||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
```shell
|
||||
~~~
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
```
|
||||
~~~
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
|
@ -90,7 +89,7 @@ from the host where you ran kubespray playbooks.
|
|||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
|
@ -150,14 +149,14 @@ kube-system weave-net-xr46t 2/2 Running 0
|
|||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
```
|
||||
~~~
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
|
@ -170,7 +169,7 @@ and excerpt from this script, to get an idea:
|
|||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
```
|
||||
~~~
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
@ -64,7 +63,6 @@
|
|||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
|
|
|
@ -17,7 +17,7 @@ pass_or_fail() {
|
|||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
|
@ -46,7 +46,7 @@ test_distro() {
|
|||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
|||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
|
|
@ -41,14 +41,13 @@ from ruamel.yaml import YAML
|
|||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
|
@ -63,16 +62,13 @@ def get_var_as_bool(name, default):
|
|||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
@ -82,54 +78,32 @@ class KubesprayInventory(object):
|
|||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
|
@ -179,37 +153,23 @@ class KubesprayInventory(object):
|
|||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
def build_hostnames(self, changed_hosts):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
next_host = ""
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
|
@ -218,8 +178,6 @@ class KubesprayInventory(object):
|
|||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
|
@ -233,21 +191,11 @@ class KubesprayInventory(object):
|
|||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
|
||||
if USE_REAL_HOSTNAME:
|
||||
cmd = ("ssh -oStrictHostKeyChecking=no "
|
||||
+ access_ip + " 'hostname -s'")
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
|
@ -266,7 +214,6 @@ class KubesprayInventory(object):
|
|||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
|
@ -282,7 +229,7 @@ class KubesprayInventory(object):
|
|||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||
|
||||
for host in hosts:
|
||||
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
|
||||
if '-' in host and not host.startswith('-'):
|
||||
start, end = host.strip().split('-')
|
||||
try:
|
||||
reworked_hosts.extend(ips(start, end))
|
||||
|
@ -310,7 +257,7 @@ class KubesprayInventory(object):
|
|||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
|
@ -331,54 +278,52 @@ class KubesprayInventory(object):
|
|||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
elif group != 'k8s-cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
self.add_host_to_group('kube-master', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
|
@ -435,11 +380,9 @@ help - Display this message
|
|||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
|
@ -450,9 +393,8 @@ Configurable env vars:
|
|||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
|
@ -473,7 +415,6 @@ def main(argv=None):
|
|||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -13,9 +13,8 @@
|
|||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
@ -27,28 +26,6 @@ if path not in sys.path:
|
|||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
|
@ -90,14 +67,23 @@ class TestInventory(unittest.TestCase):
|
|||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
|
@ -113,30 +99,6 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
|
@ -151,24 +113,7 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
|
@ -277,11 +222,11 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
|
@ -296,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
|
@ -306,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
|
@ -330,12 +275,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
|
@ -346,12 +291,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
@ -368,7 +313,7 @@ class TestInventory(unittest.TestCase):
|
|||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
|
@ -377,7 +322,17 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
|
@ -386,210 +341,6 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
|
|
@ -5,7 +5,7 @@ deployment on VMs.
|
|||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
## User creation
|
||||
### User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
|||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
|
|
12
contrib/metallb/README.md
Normal file
12
contrib/metallb/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Deploy MetalLB into Kubespray/Kubernetes
|
||||
```
|
||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||
```
|
||||
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||
|
||||
## Install
|
||||
```
|
||||
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
|
||||
|
||||
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||
```
|
1
contrib/metallb/library
Symbolic link
1
contrib/metallb/library
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../library
|
12
contrib/metallb/metallb.yml
Normal file
12
contrib/metallb/metallb.yml
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: provision }
|
16
contrib/metallb/roles/provision/defaults/main.yml
Normal file
16
contrib/metallb/roles/provision/defaults/main.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
metallb:
|
||||
ip_range:
|
||||
- "10.5.0.50-10.5.0.99"
|
||||
protocol: "layer2"
|
||||
# additional_address_pools:
|
||||
# kube_service_pool:
|
||||
# ip_range:
|
||||
# - 10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
port: "7472"
|
||||
version: v0.7.3
|
40
contrib/metallb/roles/provision/tasks/main.yml
Normal file
40
contrib/metallb/roles/provision/tasks/main.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Check cluster settings for MetalLB"
|
||||
fail:
|
||||
msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132"
|
||||
when:
|
||||
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
|
||||
|
||||
- name: Kubernetes Apps | Check AppArmor status
|
||||
command: which apparmor_parser
|
||||
register: apparmor_status
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
failed_when: false
|
||||
|
||||
- name: Kubernetes Apps | Set apparmor_enabled
|
||||
set_fact:
|
||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||
become: true
|
||||
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||
with_items: ["metallb.yml", "metallb-config.yml"]
|
||||
register: "rendering"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||
kube:
|
||||
name: "MetalLB"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
become: true
|
||||
with_items: "{{ rendering.results }}"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config
|
||||
data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: loadbalanced
|
||||
protocol: {{ metallb.protocol }}
|
||||
addresses:
|
||||
{% for ip_range in metallb.ip_range %}
|
||||
- {{ ip_range }}
|
||||
{% endfor %}
|
||||
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||
- name: {{ pool }}
|
||||
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||
addresses:
|
||||
{% for ip_range in metallb.additional_address_pools[pool].ip_range %}
|
||||
- {{ ip_range }}
|
||||
{% endfor %}
|
||||
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
263
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
263
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
|
@ -0,0 +1,263 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
labels:
|
||||
app: metallb
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: controller
|
||||
labels:
|
||||
app: metallb
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: speaker
|
||||
labels:
|
||||
app: metallb
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metallb-system:controller
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metallb-system:speaker
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{% if podsecuritypolicy_enabled %}
|
||||
- apiGroups: ["policy"]
|
||||
resourceNames: ["metallb"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: metallb
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
|
||||
{% if apparmor_enabled %}
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
{% endif %}
|
||||
labels:
|
||||
app: metallb
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- net_raw
|
||||
volumes:
|
||||
- secret
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: {{ metallb.port }}
|
||||
max: {{ metallb.port }}
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: true
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config-watcher
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
|
||||
## Role bindings
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metallb-system:controller
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
namespace: metallb-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:controller
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metallb-system:speaker
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:speaker
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config-watcher
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: config-watcher
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: speaker
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ metallb.port }}"
|
||||
spec:
|
||||
serviceAccountName: speaker
|
||||
terminationGracePeriodSeconds: 0
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: speaker
|
||||
image: metallb/speaker:{{ metallb.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
- --config=config
|
||||
env:
|
||||
- name: METALLB_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ metallb.port }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ metallb.limits.cpu }}
|
||||
memory: {{ metallb.limits.memory }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
add:
|
||||
- net_raw
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: controller
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
spec:
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ metallb.port }}"
|
||||
spec:
|
||||
serviceAccountName: controller
|
||||
terminationGracePeriodSeconds: 0
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534 # nobody
|
||||
containers:
|
||||
- name: controller
|
||||
image: metallb/controller:{{ metallb.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
- --config=config
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ metallb.port }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ metallb.limits.cpu }}
|
||||
memory: {{ metallb.limits.memory }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
---
|
|
@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
|
|||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
```
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
|
@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
|||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
```
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
|
@ -39,7 +39,7 @@ number_of_k8s_nodes = "0"
|
|||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
|
|||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
|
@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
|
|||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
|
@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
|
|||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
|
@ -23,16 +23,16 @@
|
|||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
|
|
@ -8,22 +8,18 @@ Installs and configures GlusterFS on Linux.
|
|||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
glusterfs_default_release: ""
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
|
@ -33,11 +29,9 @@ None.
|
|||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -18,7 +18,7 @@
|
|||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
yum: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
|
@ -82,7 +82,6 @@
|
|||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -19,7 +19,7 @@
|
|||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- { role: prepare }
|
||||
- role_under_test
|
|
@ -3,13 +3,12 @@
|
|||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
|
@ -20,4 +19,4 @@
|
|||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
|
|
|
@ -1,26 +1,17 @@
|
|||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
|
|
@ -2,25 +2,18 @@ all:
|
|||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
k8s-cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube-master:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
kube-node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: 0640
|
||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
|
|
|
@ -10,11 +10,10 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology."
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod." # noqa 301
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: 0644
|
||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
|
@ -30,10 +27,7 @@
|
|||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
|
@ -15,7 +15,7 @@
|
|||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
|
@ -28,10 +28,9 @@
|
|||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
when: "secret_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
|
@ -41,5 +40,5 @@
|
|||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
that: "secret_state.stdout != \"\""
|
||||
msg: "Heketi config secret is not present."
|
||||
|
|
|
@ -2,10 +2,7 @@
|
|||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
|
|
@ -10,11 +10,10 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
- name: "Copy topology configuration into container."
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology."
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
|
|
|
@ -73,8 +73,8 @@
|
|||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
@ -84,8 +84,8 @@
|
|||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
@ -19,10 +19,10 @@
|
|||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
|
@ -30,16 +30,16 @@
|
|||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
|
|
@ -1,51 +1,51 @@
|
|||
---
|
||||
- name: Remove storage class. # noqa 301
|
||||
- name: "Remove storage class."
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
- name: "Tear down glusterfs."
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service."
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding"
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret"
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup"
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account"
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
- name: "Remove heketi storage secret"
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
# Offline deployment
|
||||
|
||||
## manage-offline-container-images.sh
|
||||
|
||||
Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment. if images are from a private registry,
|
||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh create
|
||||
```
|
||||
|
||||
Step(2) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh register
|
||||
```
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── files.list.template
|
||||
├── images.list
|
||||
└── images.list.template
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
## manage-offline-files.sh
|
||||
|
||||
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
|
||||
|
||||
Step(1) generate `files.list`
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
```
|
||||
|
||||
Step(2) download files and run nginx container
|
||||
|
||||
```shell
|
||||
./manage-offline-files.sh
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
|
@ -1 +0,0 @@
|
|||
{ "insecure-registries":["HOSTNAME:5000"] }
|
|
@ -1,33 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# generate all download files url template
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
|
||||
|
||||
# generate all images list template
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# run ansible to expand templates
|
||||
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
|
||||
|
||||
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
- role: kubespray-defaults
|
||||
when: false
|
||||
- role: download
|
||||
when: false
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
with_items:
|
||||
- files
|
||||
- images
|
|
@ -1,172 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
|
||||
IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz"
|
||||
IMAGE_DIR="${CURRENT_DIR}/container-images"
|
||||
IMAGE_LIST="${IMAGE_DIR}/container-images.txt"
|
||||
RETRY_COUNT=5
|
||||
|
||||
function create_container_image_tar() {
|
||||
set -e
|
||||
|
||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
rm -rf ${IMAGE_DIR}
|
||||
mkdir ${IMAGE_DIR}
|
||||
cd ${IMAGE_DIR}
|
||||
|
||||
sudo docker pull registry:latest
|
||||
sudo docker save -o registry-latest.tar registry:latest
|
||||
|
||||
for image in ${IMAGES}
|
||||
do
|
||||
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
|
||||
set +e
|
||||
for step in $(seq 1 ${RETRY_COUNT})
|
||||
do
|
||||
sudo docker pull ${image}
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
echo "Failed to pull ${image} at step ${step}"
|
||||
if [ ${step} -eq ${RETRY_COUNT} ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
sudo docker save -o ${FILE_NAME} ${image}
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "registry.k8s.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
done
|
||||
|
||||
cd ..
|
||||
sudo chown ${USER} ${IMAGE_DIR}/*
|
||||
tar -zcvf ${IMAGE_TAR_FILE} ./container-images
|
||||
rm -rf ${IMAGE_DIR}
|
||||
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
}
|
||||
|
||||
function register_container_images() {
|
||||
if [ ! -f ${IMAGE_TAR_FILE} ]; then
|
||||
echo "${IMAGE_TAR_FILE} should exist."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d ${TEMP_DIR} ]; then
|
||||
mkdir ${TEMP_DIR}
|
||||
fi
|
||||
|
||||
# To avoid "http: server gave http response to https client" error.
|
||||
LOCALHOST_NAME=$(hostname)
|
||||
if [ -d /etc/docker/ ]; then
|
||||
set -e
|
||||
# Ubuntu18.04, RHEL7/CentOS7
|
||||
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
|
||||
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
|
||||
elif [ -d /etc/containers/ ]; then
|
||||
set -e
|
||||
# RHEL8/CentOS8
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
else
|
||||
echo "docker package(docker-ce, etc.) should be installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
set +e
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${raw_image}" ]; then
|
||||
echo "Failed to get raw_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${org_image}" ]; then
|
||||
echo "Failed to get org_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${image_id}" ]; then
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
done <<< "$(cat ${IMAGE_LIST})"
|
||||
|
||||
echo "Succeeded to register container images to local registry."
|
||||
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
|
||||
echo "- kube_image_repo"
|
||||
echo "- gcr_image_repo"
|
||||
echo "- docker_image_repo"
|
||||
echo "- quay_image_repo"
|
||||
}
|
||||
|
||||
if [ "${OPTION}" == "create" ]; then
|
||||
create_container_image_tar
|
||||
elif [ "${OPTION}" == "register" ]; then
|
||||
register_container_images
|
||||
else
|
||||
echo "This script has two features:"
|
||||
echo "(1) Get container images from an environment which is deployed online."
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment. if images are from"
|
||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
echo ""
|
||||
echo "Step(1) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh create"
|
||||
echo ""
|
||||
echo "Step(2) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh register"
|
||||
echo ""
|
||||
echo "Please specify 'create' or 'register'."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
|
||||
OFFLINE_FILES_DIR_NAME="offline-files"
|
||||
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
|
||||
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
|
||||
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
|
||||
NGINX_PORT=8080
|
||||
|
||||
# download files
|
||||
if [ ! -f "${FILES_LIST}" ]; then
|
||||
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "${OFFLINE_FILES_DIR}"
|
||||
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||
mkdir "${OFFLINE_FILES_DIR}"
|
||||
|
||||
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
||||
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||
|
||||
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||
|
||||
# run nginx container server
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
|
@ -1,39 +0,0 @@
|
|||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
default_type application/octet-stream;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
include /etc/nginx/default.d/*.conf;
|
||||
location / {
|
||||
root /usr/share/nginx/html/download;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
[registries.search]
|
||||
registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['HOSTNAME:5000']
|
||||
|
||||
[registries.block]
|
||||
registries = []
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
disable_service_firewall: false
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
- name: Disable service firewalld
|
||||
systemd:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
name: ufw
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
|
@ -9,8 +9,8 @@ Summary: Ansible modules for installing Kubernetes
|
|||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-sigs/kubespray
|
||||
Source0: https://github.com/kubernetes-sigs/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
|
@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
|||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue