Compare commits
7 commits
master
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
|
67167bd8d2 | ||
|
fa0de9faa0 | ||
|
abe9b40602 | ||
|
b0ccda8a42 | ||
|
c8dad3f6c6 | ||
|
5ec9ab7ec0 | ||
|
73097aa39d |
1418 changed files with 16780 additions and 57599 deletions
|
@ -2,8 +2,13 @@
|
|||
parseable: true
|
||||
skip_list:
|
||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||
|
||||
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
|
||||
# The following rules throw errors.
|
||||
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||
- '301'
|
||||
- '305'
|
||||
- '306'
|
||||
- '404'
|
||||
- '503'
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
|
@ -18,13 +23,3 @@ skip_list:
|
|||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
root = true
|
||||
|
||||
[*.{yaml,yml,yml.j2,yaml.j2}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
|
||||
[{Dockerfile}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
|
@ -18,8 +18,6 @@ explain why.
|
|||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
- **Version of Python** (`python --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
@ -27,8 +25,8 @@ explain why.
|
|||
**Network plugin used**:
|
||||
|
||||
|
||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
**Copy of your inventory file:**
|
||||
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
|
2
.github/ISSUE_TEMPLATE/support.md
vendored
2
.github/ISSUE_TEMPLATE/support.md
vendored
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: kind/support
|
||||
labels: triage/support
|
||||
|
||||
---
|
||||
|
||||
|
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -1,9 +1,9 @@
|
|||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
|
||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide/first-contribution.md and developer guide https://git.k8s.io/community/contributors/devel/development.md
|
||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
|
||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/sig-testing/testing.md
|
||||
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
|
||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
|
||||
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||
|
|
18
.gitignore
vendored
18
.gitignore
vendored
|
@ -1,12 +1,8 @@
|
|||
.vagrant
|
||||
*.retry
|
||||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
|
@ -14,19 +10,15 @@ contrib/offline/offline-files.tar.gz
|
|||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
deploy.sh
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
!inventory/local
|
||||
!inventory/sample
|
||||
!inventory/c12s-sample
|
||||
inventory/*/artifacts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
|
@ -105,13 +97,3 @@ target/
|
|||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
|
|
@ -4,19 +4,17 @@ stages:
|
|||
- deploy-part1
|
||||
- moderator
|
||||
- deploy-part2
|
||||
- deploy-part3
|
||||
- deploy-gce
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.20.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
|
@ -27,42 +25,32 @@ variables:
|
|||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
LOG_LEVEL: "-vv"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- packet
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.10.0
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
|
@ -78,7 +66,6 @@ ci-authorized:
|
|||
include:
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/digital-ocean.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
|
19
.gitlab-ci/digital-ocean.yml
Normal file
19
.gitlab-ci/digital-ocean.yml
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
.do_variables: &do_variables
|
||||
PRIVATE_KEY: $DO_PRIVATE_KEY
|
||||
CI_PLATFORM: "do"
|
||||
SSH_USER: root
|
||||
|
||||
.do: &do
|
||||
extends: .testcases
|
||||
tags:
|
||||
- do
|
||||
|
||||
do_ubuntu-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .do
|
||||
variables:
|
||||
<<: *do_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
247
.gitlab-ci/gce.yml
Normal file
247
.gitlab-ci/gce.yml
Normal file
|
@ -0,0 +1,247 @@
|
|||
---
|
||||
.gce_variables: &gce_variables
|
||||
GCE_USER: travis
|
||||
SSH_USER: $GCE_USER
|
||||
CLOUD_MACHINE_TYPE: "g1-small"
|
||||
CI_PLATFORM: "gce"
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
|
||||
.cache: &cache
|
||||
cache:
|
||||
key: "$CI_BUILD_REF_NAME"
|
||||
paths:
|
||||
- downloads/
|
||||
- $HOME/.cache
|
||||
|
||||
.gce: &gce
|
||||
extends: .testcases
|
||||
<<: *cache
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
tags:
|
||||
- gce
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||
# stage: deploy-gce
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
### PR JOBS PART1
|
||||
|
||||
gce_ubuntu18-flannel-aio:
|
||||
stage: deploy-part1
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
### PR JOBS PART2
|
||||
|
||||
gce_coreos-calico-aio:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
|
||||
gce_centos7-flannel-addons:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
gce_centos-weave-kubeadm-sep:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-canal-ha-triggers:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-flannel-addons-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
|
||||
|
||||
gce_ubuntu-canal-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-canal-kubeadm:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos-weave-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-contiv-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-cilium:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-weave:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-weave-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_debian9-calico-upgrade:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_debian9-calico-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-canal-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_rhel7-canal-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-canal-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-calico-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos7-calico-ha-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos7-multus-calico:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos7_multus_calico_variables
|
||||
when: manual
|
||||
|
||||
gce_oracle-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_opensuse-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||
gce_coreos-alpha-weave-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-kube-router-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
|
@ -2,9 +2,6 @@
|
|||
yamllint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
LANG: C.UTF-8
|
||||
script:
|
||||
- yamllint --strict .
|
||||
except: ['triggers', 'master']
|
||||
|
@ -12,25 +9,23 @@ yamllint:
|
|||
vagrant-validate:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- vagrant validate --ignore-provider
|
||||
except: ['triggers', 'master']
|
||||
|
||||
ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- ansible-lint -v
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||
ANSIBLE_REMOTE_USER: root
|
||||
|
@ -46,45 +41,9 @@ syntax-check:
|
|||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
- pip install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
when: manual
|
||||
except: ['triggers', 'master']
|
||||
|
||||
markdownlint:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli@0.22.0
|
||||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/md-table/test.sh
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
|
@ -1,328 +1,122 @@
|
|||
---
|
||||
.packet:
|
||||
.packet_variables: &packet_variables
|
||||
CI_PLATFORM: "packet"
|
||||
SSH_USER: "kubespray"
|
||||
|
||||
.packet: &packet
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
<<: *packet_variables
|
||||
tags:
|
||||
- packet
|
||||
except: [triggers]
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
only: [/^pr-.*$/]
|
||||
extends: .packet
|
||||
except: ['triggers']
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
.test-upgrade: &test-upgrade
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
<<: *packet
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu18-aio-docker:
|
||||
packet_centos7-flannel-addons:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
packet_centos-weave-kubeadm-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
packet_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||
|
||||
packet_ubuntu-canal-ha:
|
||||
stage: deploy-special
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-canal-kubeadm:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu-flannel-ha:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-contiv-sep:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha:
|
||||
packet_ubuntu18-flannel-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
packet_debian9-macvlan-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-ovn:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-canal-ha:
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_oracle-7-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
packet_ubuntu-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
|
|
|
@ -2,15 +2,14 @@
|
|||
shellcheck:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.7.1
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- curl --silent "https://storage.googleapis.com/shellcheck/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
|
|
@ -3,44 +3,36 @@
|
|||
.terraform_install:
|
||||
extends: .job
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
- ./tests/scripts/terraform_install.sh
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
tags: [light]
|
||||
stage: deploy-part3
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
variables:
|
||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||
ANSIBLE_INVENTORY: hosts
|
||||
|
@ -51,76 +43,56 @@
|
|||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-metal:
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
tf-packet-ubuntu16-default:
|
||||
extends: .terraform_apply
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
TF_VAR_facility: ewr1
|
||||
TF_VAR_public_key_path: ""
|
||||
TF_VAR_operating_system: ubuntu_16_04
|
||||
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
tf-packet-ubuntu18-default:
|
||||
extends: .terraform_apply
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ewr1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ams1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
TF_VAR_facility: ams1
|
||||
TF_VAR_public_key_path: ""
|
||||
TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
|
@ -133,103 +105,58 @@ tf-validate-upcloud:
|
|||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
|
||||
# Elastx is generously donating resources for Kubespray on Openstack CI
|
||||
# Contacts: @gix @bl0m1
|
||||
.elastx_variables: &elastx_variables
|
||||
OS_AUTH_URL: https://ops.elastx.cloud:5000
|
||||
OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4
|
||||
OS_PROJECT_NAME: kubespray_ci
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: kubespray@root314.com
|
||||
OS_REGION_NAME: se-sto
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-elastx_ubuntu18-calico:
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_floatingip_pool: "elx-public1"
|
||||
TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]'
|
||||
TF_VAR_use_access_ip: "0"
|
||||
TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828"
|
||||
TF_VAR_network_name: "ci-$CI_JOB_ID"
|
||||
TF_VAR_az_list: '["sto1"]'
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
tf-ovh_coreos-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: core
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_image: "CoreOS Stable"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
---
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
MD013: false
|
||||
MD029: false
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
repos:
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
|
@ -1,9 +1,6 @@
|
|||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
|
|
|
@ -2,45 +2,10 @@
|
|||
|
||||
## How to become a contributor and submit your own code
|
||||
|
||||
### Environment setup
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
|
||||
#### Molecule
|
||||
|
||||
[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`).
|
||||
|
||||
When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment.
|
||||
|
||||
#### Vagrant
|
||||
|
||||
Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md)
|
||||
|
||||
### Contributing A Patch
|
||||
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Sign the CNCF CLA (https://git.k8s.io/community/CLA.md#the-contributor-license-agreement)
|
||||
5. Submit a pull request.
|
||||
|
|
49
Dockerfile
49
Dockerfile
|
@ -1,37 +1,18 @@
|
|||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220531
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
FROM ubuntu:18.04
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,7 +1,5 @@
|
|||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
ansible-playbook -c local mitogen.yaml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
|
2
OWNERS
2
OWNERS
|
@ -4,5 +4,3 @@ approvers:
|
|||
- kubespray-approvers
|
||||
reviewers:
|
||||
- kubespray-reviewers
|
||||
emeritus_approvers:
|
||||
- kubespray-emeritus_approvers
|
|
@ -1,26 +1,21 @@
|
|||
aliases:
|
||||
kubespray-approvers:
|
||||
- mattymo
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- mattymo
|
||||
- atoms
|
||||
- chadswen
|
||||
- rsmitty
|
||||
- bogdando
|
||||
- bradbeam
|
||||
- woopstar
|
||||
- riverzhang
|
||||
- holser
|
||||
- smana
|
||||
- verwilst
|
||||
- miouge1
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- chapsuk
|
||||
- mirwan
|
||||
- holmsten
|
||||
|
|
317
README.md
317
README.md
|
@ -1,17 +1,19 @@
|
|||
# Deploy a Production Ready Kubernetes Cluster
|
||||
|
||||
![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png)
|
||||
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
Deploy a Production Ready Kubernetes Cluster
|
||||
============================================
|
||||
|
||||
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
- **Continuous integration tests**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
- **Continuous integration tests**
|
||||
|
||||
## Quick Start
|
||||
Quick Start
|
||||
-----------
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
|
@ -19,243 +21,192 @@ To deploy the cluster you can use :
|
|||
|
||||
#### Usage
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip install -r requirements.txt
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
```
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.20.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
|
||||
```ShellSession
|
||||
python -V && pip -V
|
||||
```
|
||||
python -V && pip -V
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
Install the necessary requirements
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
|
||||
```ShellSession
|
||||
vagrant up
|
||||
```
|
||||
Documents
|
||||
---------
|
||||
|
||||
## Documents
|
||||
- [Requirements](#requirements)
|
||||
- [Kubespray vs ...](docs/comparisons.md)
|
||||
- [Getting started](docs/getting-started.md)
|
||||
- [Ansible inventory and tags](docs/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/integration.md)
|
||||
- [Deployment data variables](docs/vars.md)
|
||||
- [DNS stack](docs/dns-stack.md)
|
||||
- [HA mode](docs/ha-mode.md)
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [CoreOS bootstrap](docs/coreos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
- [Downloaded artifacts](docs/downloads.md)
|
||||
- [Cloud providers](docs/cloud.md)
|
||||
- [OpenStack](docs/openstack.md)
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
- [Requirements](#requirements)
|
||||
- [Kubespray vs ...](docs/comparisons.md)
|
||||
- [Getting started](docs/getting-started.md)
|
||||
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
|
||||
- [Ansible inventory and tags](docs/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/integration.md)
|
||||
- [Deployment data variables](docs/vars.md)
|
||||
- [DNS stack](docs/dns-stack.md)
|
||||
- [HA mode](docs/ha-mode.md)
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
|
||||
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
- [Downloaded artifacts](docs/downloads.md)
|
||||
- [Cloud providers](docs/cloud.md)
|
||||
- [OpenStack](docs/openstack.md)
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Equinix Metal](docs/equinix-metal.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
Supported Linux Distributions
|
||||
-----------------------------
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
- **Container Linux by CoreOS**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04
|
||||
- **CentOS/RHEL** 7
|
||||
- **Fedora** 28
|
||||
- **Fedora/CentOS** Atomic
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
Supported Components
|
||||
--------------------
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.14
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.10.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.16
|
||||
- [helm](https://helm.sh/) v3.9.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
|
||||
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
|
||||
|
||||
## Container Runtime Notes
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.23**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
Requirements
|
||||
------------
|
||||
- **Minimum required version of Kubernetes is v1.14**
|
||||
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Hardware:
|
||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
|
||||
## Network Plugins
|
||||
Network Plugins
|
||||
---------------
|
||||
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
- [calico](docs/calico.md): bgp (layer 3) networking.
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
|
||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||
|
||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
Community docs and resources
|
||||
----------------------------
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
Tools and projects on top of Kubespray
|
||||
--------------------------------------
|
||||
|
||||
## Community docs and resources
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0)
|
||||
CI Tests
|
||||
--------
|
||||
|
||||
## Tools and projects on top of Kubespray
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by Google (GCE)
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
|
85
RELEASE.md
85
RELEASE.md
|
@ -2,82 +2,39 @@
|
|||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||
4. The release issue is closed
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
## Major/minor releases, merge freezes and milestones
|
||||
|
||||
* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags.
|
||||
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
||||
branches, and there are no backports. Therefore, there is no need for merge
|
||||
freezes as well.
|
||||
|
||||
* Security patches and bugs might be backported.
|
||||
|
||||
* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered
|
||||
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||
[GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones).
|
||||
That milestone remains open for the major/minor releases support lifetime,
|
||||
which ends once the milestone is closed. Then only a next major or minor release
|
||||
can be done.
|
||||
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||
support lifetime, which ends once the milestone closed. Then only a next major
|
||||
or minor release can be done.
|
||||
|
||||
* Kubespray major and minor releases are bound to the given `kube_version` major/minor
|
||||
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||
Older or newer component versions are not supported and not tested for the given
|
||||
release (even if included in the checksum variables, like `kubeadm_checksums`).
|
||||
Older or newer versions are not supported and not tested for the given release.
|
||||
|
||||
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||
[semver](https://semver.org/). Every version describes only a stable release.
|
||||
[semver](http://semver.org/). Every version describes only a stable release.
|
||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||
the contributed addons or bound versions of Kubernetes and other components, are
|
||||
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||
document.
|
||||
|
||||
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Committee to reach out
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
atoms
|
||||
mattymo
|
180
Vagrantfile
vendored
180
Vagrantfile
vendored
|
@ -7,81 +7,63 @@ require 'fileutils'
|
|||
|
||||
Vagrant.require_version ">= 2.0.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
|
||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances = 3
|
||||
$instance_name_prefix = "k8s"
|
||||
$vm_gui = false
|
||||
$vm_memory = 2048
|
||||
$vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$os = "ubuntu1804"
|
||||
$network_plugin = "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking = false
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances = $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances = $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks = false
|
||||
$kube_node_instances_with_disks_size = "20G"
|
||||
$kube_node_instances_with_disks_number = 2
|
||||
$override_disk_size = false
|
||||
$disk_size = "20GB"
|
||||
$local_path_provisioner_enabled = false
|
||||
$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
|
||||
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
require CONFIG
|
||||
end
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances ||= 3
|
||||
$instance_name_prefix ||= "k8s"
|
||||
$vm_gui ||= false
|
||||
$vm_memory ||= 2048
|
||||
$vm_cpus ||= 2
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
|
@ -92,16 +74,16 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
|||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
@ -151,12 +133,9 @@ Vagrant.configure("2") do |config|
|
|||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
lv.nested = $libvirt_nested
|
||||
lv.cpu_mode = "host-model"
|
||||
lv.memory = $vm_memory
|
||||
lv.cpus = $vm_cpus
|
||||
lv.default_prefix = 'kubespray'
|
||||
|
@ -173,7 +152,7 @@ Vagrant.configure("2") do |config|
|
|||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -186,55 +165,30 @@ Vagrant.configure("2") do |config|
|
|||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst
|
||||
end
|
||||
else
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
node.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"flannel_interface": "eth1",
|
||||
"kube_network_plugin": $network_plugin,
|
||||
"kube_network_plugin_multus": $multi_networking,
|
||||
"download_run_once": $download_run_once,
|
||||
"download_run_once": "True",
|
||||
"download_localhost": "False",
|
||||
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
||||
# Make kubespray cache even when download_run_once is false
|
||||
"download_force_cache": $download_force_cache,
|
||||
"download_force_cache": "True",
|
||||
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
||||
"download_keep_remote_cache": "False",
|
||||
"docker_rpm_keepcache": "1",
|
||||
"docker_keepcache": "1",
|
||||
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
||||
"kubeconfig_localhost": "True",
|
||||
"kubectl_localhost": "True",
|
||||
|
@ -244,28 +198,24 @@ Vagrant.configure("2") do |config|
|
|||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
end
|
||||
ansible.become = true
|
||||
ansible.limit = "all,localhost"
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
---
|
||||
theme: jekyll-theme-slate
|
||||
theme: jekyll-theme-slate
|
11
ansible.cfg
11
ansible.cfg
|
@ -1,8 +1,9 @@
|
|||
[ssh_connection]
|
||||
pipelining=True
|
||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
|
@ -10,13 +11,11 @@ host_key_checking=False
|
|||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||
[inventory]
|
||||
ignore_patterns = artifacts, credentials
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.14.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
that: "'127.0.0.1' | ipaddr"
|
||||
tags:
|
||||
- check
|
||||
|
||||
# CentOS 7 provides too old jinja version
|
||||
- name: "Check that jinja is not too old (install via pip)"
|
||||
assert:
|
||||
msg: "Your Jinja version is too old, install via pip"
|
||||
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
|
||||
tags:
|
||||
- check
|
122
cluster.yml
122
cluster.yml
|
@ -1,131 +1,115 @@
|
|||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.8"
|
||||
assert:
|
||||
msg: "Ansible must be v2.7.8 or higher"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.8", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
strategy: linear
|
||||
- hosts: k8s-cluster:etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
gather_facts: False
|
||||
- hosts: k8s-cluster:etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd:kube_control_plane
|
||||
gather_facts: False
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
gather_facts: False
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
|
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
|||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
|
@ -69,8 +69,8 @@ class SearchEC2Tags(object):
|
|||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
boto3 # Apache-2.0
|
|
@ -15,23 +15,22 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
|
|||
|
||||
## Configuration through group_vars/all
|
||||
|
||||
You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally
|
||||
unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public
|
||||
key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
||||
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||
experience.
|
||||
|
||||
## Bastion host
|
||||
|
||||
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||
also removes all public IPs from all other VMs.
|
||||
also removes all public IPs from all other VMs.
|
||||
|
||||
## Generating and applying
|
||||
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
./apply-rg.sh <resource_group_name>
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
|
@ -42,26 +41,24 @@ take care about creating/modifying whatever is needed.
|
|||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
./clear-rg.sh <resource_group_name>
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
./generate-inventory.sh <resource_group_name>
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
$ cd kubespray-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
|
|
@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
else
|
||||
echo "Azure cli not found"
|
||||
fi
|
||||
|
|
19
contrib/azurerm/apply-rg_2.sh
Executable file
19
contrib/azurerm/apply-rg_2.sh
Executable file
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
|
@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
ansible-playbook generate-templates.yml
|
||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||
fi
|
||||
|
|
14
contrib/azurerm/clear-rg_2.sh
Executable file
14
contrib/azurerm/clear-rg_2.sh
Executable file
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
|
@ -7,7 +7,7 @@ cluster_name: example
|
|||
# node that can be used to access the masters and minions
|
||||
use_bastion: false
|
||||
|
||||
# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||
# Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
||||
# bastion_domain_prefix: k8s-bastion
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
|
||||
- name: Query Azure VMs # noqa 301
|
||||
- name: Query Azure VMs
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
|
@ -12,4 +12,3 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,13 +21,13 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
---
|
||||
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
- name: Query Azure VMs IPs
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
- name: Query Azure VMs Roles
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
|
@ -22,10 +22,8 @@
|
|||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: 0644
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -21,14 +21,14 @@
|
|||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
|
|
|
@ -8,13 +8,11 @@
|
|||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
|
|
@ -144,7 +144,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_control_plane,etcd"
|
||||
"roles": "kube-master,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_node"
|
||||
"roles": "kube-node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
@ -112,4 +112,4 @@
|
|||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -6,7 +6,6 @@ to serve as Kubernetes "nodes", which in turn will run
|
|||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
|
@ -28,7 +27,7 @@ See below for a complete successful run:
|
|||
|
||||
1. Create the node containers
|
||||
|
||||
```shell
|
||||
~~~~
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
|
@ -37,15 +36,15 @@ ansible-playbook -i hosts dind-cluster.yaml
|
|||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
```
|
||||
~~~~
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
```
|
||||
~~~
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
|
@ -53,33 +52,33 @@ Note that there's coupling between above created node containers
|
|||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
```shell
|
||||
~~~
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
```
|
||||
~~~
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
|
@ -90,7 +89,7 @@ from the host where you ran kubespray playbooks.
|
|||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
|
@ -150,14 +149,14 @@ kube-system weave-net-xr46t 2/2 Running 0
|
|||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
```
|
||||
~~~
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
|
@ -170,7 +169,7 @@ and excerpt from this script, to get an idea:
|
|||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
```
|
||||
~~~
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
@ -64,7 +63,6 @@
|
|||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
|
|
|
@ -17,7 +17,7 @@ pass_or_fail() {
|
|||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
|
@ -46,7 +46,7 @@ test_distro() {
|
|||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
|||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
# Add hosts with different ip and access ip:
|
||||
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||
# Add hosts with a specific hostname, ip, and optional access ip:
|
||||
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
# Delete a host: inventory.py -10.10.1.3
|
||||
# Delete a host by id: inventory.py -node1
|
||||
#
|
||||
|
@ -41,14 +39,12 @@ from ruamel.yaml import YAML
|
|||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
|
@ -63,16 +59,13 @@ def get_var_as_bool(name, default):
|
|||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
@ -82,54 +75,32 @@ class KubesprayInventory(object):
|
|||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
|
@ -179,37 +150,23 @@ class KubesprayInventory(object):
|
|||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
def build_hostnames(self, changed_hosts):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
next_host = ""
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
|
@ -218,8 +175,6 @@ class KubesprayInventory(object):
|
|||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
|
@ -233,40 +188,16 @@ class KubesprayInventory(object):
|
|||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
|
||||
if USE_REAL_HOSTNAME:
|
||||
cmd = ("ssh -oStrictHostKeyChecking=no "
|
||||
+ access_ip + " 'hostname -s'")
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
hostname, ip, access_ip = host.split(',')
|
||||
except Exception:
|
||||
hostname, ip = host.split(',')
|
||||
access_ip = ip
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
all_hosts[hostname] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
raise Exception("Adding hosts by hostname is not supported.")
|
||||
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
|
@ -275,14 +206,14 @@ class KubesprayInventory(object):
|
|||
# Python 3.x
|
||||
start = int(ip_address(start_address))
|
||||
end = int(ip_address(end_address))
|
||||
except Exception:
|
||||
except:
|
||||
# Python 2.7
|
||||
start = int(ip_address(str(start_address)))
|
||||
end = int(ip_address(str(end_address)))
|
||||
start = int(ip_address(unicode(start_address)))
|
||||
end = int(ip_address(unicode(end_address)))
|
||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||
|
||||
for host in hosts:
|
||||
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
|
||||
if '-' in host and not host.startswith('-'):
|
||||
start, end = host.strip().split('-')
|
||||
try:
|
||||
reworked_hosts.extend(ips(start, end))
|
||||
|
@ -310,7 +241,7 @@ class KubesprayInventory(object):
|
|||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
|
@ -331,54 +262,52 @@ class KubesprayInventory(object):
|
|||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
elif group != 'k8s-cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
self.add_host_to_group('kube-master', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
|
@ -419,8 +348,6 @@ class KubesprayInventory(object):
|
|||
self.print_config()
|
||||
elif command == 'print_ips':
|
||||
self.print_ips()
|
||||
elif command == 'print_hostnames':
|
||||
self.print_hostnames()
|
||||
elif command == 'load':
|
||||
self.load_file(args)
|
||||
else:
|
||||
|
@ -434,15 +361,11 @@ Available commands:
|
|||
help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
|
@ -450,18 +373,14 @@ Configurable env vars:
|
|||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
yaml.dump(self.yaml_config, sys.stdout)
|
||||
|
||||
def print_hostnames(self):
|
||||
print(' '.join(self.yaml_config['all']['hosts'].keys()))
|
||||
|
||||
def print_ips(self):
|
||||
ips = []
|
||||
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||
|
@ -473,7 +392,6 @@ def main(argv=None):
|
|||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -12,10 +12,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
@ -24,29 +22,7 @@ path = "./contrib/inventory_builder/"
|
|||
if path not in sys.path:
|
||||
sys.path.append(path)
|
||||
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
import inventory
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
|
@ -67,14 +43,14 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
def test_get_ip_from_opts_invalid(self):
|
||||
optstring = "notanaddr=value something random!chars:D"
|
||||
self.assertRaisesRegex(ValueError, "IP parameter not found",
|
||||
self.inv.get_ip_from_opts, optstring)
|
||||
self.assertRaisesRegexp(ValueError, "IP parameter not found",
|
||||
self.inv.get_ip_from_opts, optstring)
|
||||
|
||||
def test_ensure_required_groups(self):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
|
@ -87,17 +63,26 @@ class TestInventory(unittest.TestCase):
|
|||
def test_get_host_id_invalid(self):
|
||||
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||
for hostname in bad_hostnames:
|
||||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
self.assertRaisesRegexp(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
|
@ -113,30 +98,6 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
|
@ -151,24 +112,7 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
|
@ -248,8 +192,8 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.assertRaisesRegex(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
|
||||
def test_purge_invalid_hosts(self):
|
||||
proper_hostnames = ['node1', 'node2']
|
||||
|
@ -264,8 +208,8 @@ class TestInventory(unittest.TestCase):
|
|||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertNotIn(
|
||||
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||
self.assertTrue(
|
||||
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
|
@ -277,13 +221,13 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_all(self):
|
||||
hosts = OrderedDict([
|
||||
|
@ -296,30 +240,30 @@ class TestInventory(unittest.TestCase):
|
|||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertIn(
|
||||
host,
|
||||
self.assertTrue(
|
||||
host in
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
|
@ -330,12 +274,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
|
@ -346,12 +290,12 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
@ -365,10 +309,10 @@ class TestInventory(unittest.TestCase):
|
|||
|
||||
def test_range2ips_incorrect_range(self):
|
||||
host_range = ['10.90.0.4-a.9b.c.e']
|
||||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
self.assertRaisesRegexp(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
|
@ -377,7 +321,17 @@ class TestInventory(unittest.TestCase):
|
|||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
|
@ -386,210 +340,6 @@ class TestInventory(unittest.TestCase):
|
|||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8, py33
|
||||
envlist = pep8, py27
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = py.test
|
||||
|
|
|
@ -5,7 +5,7 @@ deployment on VMs.
|
|||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
## User creation
|
||||
### User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
|||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
|
|
12
contrib/metallb/README.md
Normal file
12
contrib/metallb/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Deploy MetalLB into Kubespray/Kubernetes
|
||||
```
|
||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||
```
|
||||
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||
|
||||
## Install
|
||||
```
|
||||
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
|
||||
|
||||
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||
```
|
1
contrib/metallb/library
Symbolic link
1
contrib/metallb/library
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../library
|
6
contrib/metallb/metallb.yml
Normal file
6
contrib/metallb/metallb.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: provision }
|
14
contrib/metallb/roles/provision/defaults/main.yml
Normal file
14
contrib/metallb/roles/provision/defaults/main.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
metallb:
|
||||
ip_range: "10.5.0.50-10.5.0.99"
|
||||
protocol: "layer2"
|
||||
# additional_address_pools:
|
||||
# kube_service_pool:
|
||||
# ip_range: "10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
port: "7472"
|
||||
version: v0.7.3
|
18
contrib/metallb/roles/provision/tasks/main.yml
Normal file
18
contrib/metallb/roles/provision/tasks/main.yml
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||
become: true
|
||||
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||
with_items: ["metallb.yml", "metallb-config.yml"]
|
||||
register: "rendering"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||
kube:
|
||||
name: "MetalLB"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
become: true
|
||||
with_items: "{{ rendering.results }}"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config
|
||||
data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: loadbalanced
|
||||
protocol: {{ metallb.protocol }}
|
||||
addresses:
|
||||
- {{ metallb.ip_range }}
|
||||
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||
- name: {{ pool }}
|
||||
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||
addresses:
|
||||
- {{ metallb.additional_address_pools[pool].ip_range }}
|
||||
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
221
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
221
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
|
@ -0,0 +1,221 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
labels:
|
||||
app: metallb
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: controller
|
||||
labels:
|
||||
app: metallb
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: speaker
|
||||
labels:
|
||||
app: metallb
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metallb-system:controller
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metallb-system:speaker
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config-watcher
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
|
||||
## Role bindings
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metallb-system:controller
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
namespace: metallb-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:controller
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metallb-system:speaker
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:speaker
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config-watcher
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: config-watcher
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: speaker
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ metallb.port }}"
|
||||
spec:
|
||||
serviceAccountName: speaker
|
||||
terminationGracePeriodSeconds: 0
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: speaker
|
||||
image: metallb/speaker:{{ metallb.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
- --config=config
|
||||
env:
|
||||
- name: METALLB_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ metallb.port }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ metallb.limits.cpu }}
|
||||
memory: {{ metallb.limits.memory }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
add:
|
||||
- net_raw
|
||||
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: controller
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
spec:
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ metallb.port }}"
|
||||
spec:
|
||||
serviceAccountName: controller
|
||||
terminationGracePeriodSeconds: 0
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534 # nobody
|
||||
containers:
|
||||
- name: controller
|
||||
image: metallb/controller:{{ metallb.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
- --config=config
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ metallb.port }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ metallb.limits.cpu }}
|
||||
memory: {{ metallb.limits.memory }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
---
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
|
|
@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
|
|||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
```
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
|
@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
|||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
```
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
|
@ -39,7 +39,7 @@ number_of_k8s_nodes = "0"
|
|||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
|
|||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
|
@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
|
|||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
|
@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
|
|||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
|
@ -23,16 +23,16 @@
|
|||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
|
|
@ -8,24 +8,20 @@ Installs and configures GlusterFS on Linux.
|
|||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
glusterfs_default_release: ""
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
|
||||
|
||||
## Dependencies
|
||||
|
||||
|
@ -33,11 +29,9 @@ None.
|
|||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -18,7 +18,7 @@
|
|||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
- name: Include OS-specific variables.
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
# Instal xfs package
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
yum: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
|
@ -36,7 +36,7 @@
|
|||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
- name: Configure Gluster volume.
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
|
@ -46,18 +46,6 @@
|
|||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster']|length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster']|length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
mount:
|
||||
|
@ -82,7 +70,6 @@
|
|||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -19,7 +19,7 @@
|
|||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- { role: prepare }
|
||||
- role_under_test
|
|
@ -3,13 +3,12 @@
|
|||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
|
@ -20,4 +19,4 @@
|
|||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
|
|
|
@ -1,26 +1,17 @@
|
|||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
|
|
@ -2,25 +2,18 @@ all:
|
|||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
k8s-cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube-master:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
kube-node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: 0640
|
||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
|
|
|
@ -10,11 +10,10 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology."
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod." # noqa 301
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: 0644
|
||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
|
@ -30,10 +27,7 @@
|
|||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
|
@ -15,7 +15,7 @@
|
|||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
|
@ -28,10 +28,9 @@
|
|||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
when: "secret_state.stdout == \"\""
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
|
@ -41,5 +40,5 @@
|
|||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
that: "secret_state.stdout != \"\""
|
||||
msg: "Heketi config secret is not present."
|
||||
|
|
|
@ -2,10 +2,7 @@
|
|||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: 0644
|
||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
|
|
@ -10,11 +10,10 @@
|
|||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
- name: "Copy topology configuration into container."
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology."
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"kind": "DaemonSet",
|
||||
"apiVersion": "apps/v1",
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
|
@ -12,11 +12,6 @@
|
|||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
|
@ -73,8 +68,8 @@
|
|||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
@ -84,8 +79,8 @@
|
|||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
|
@ -42,11 +42,6 @@
|
|||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "deploy-heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
|
@ -55,11 +55,6 @@
|
|||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{ hostvars[node].ip }}"
|
||||
"ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
"{{ node }}"
|
||||
],
|
||||
"storage": [
|
||||
"{{ hostvars[node].ip }}"
|
||||
"{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||
]
|
||||
},
|
||||
"zone": 1
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
@ -19,10 +19,10 @@
|
|||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
|
@ -30,16 +30,16 @@
|
|||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
|
|
@ -1,51 +1,51 @@
|
|||
---
|
||||
- name: Remove storage class. # noqa 301
|
||||
- name: "Remove storage class."
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
- name: "Tear down glusterfs."
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service."
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding"
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret"
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup"
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account"
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
- name: "Remove heketi storage secret"
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
ignore_errors: true
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue