Fixed conflicts, ipip:true as defualt and added ipip_mode

This commit is contained in:
AtzeDeVries 2017-07-08 14:36:44 +02:00
commit e160018826
74 changed files with 466 additions and 357 deletions

View file

@ -24,7 +24,7 @@ explain why.
- **Version of Ansible** (`ansible --version`): - **Version of Ansible** (`ansible --version`):
**Kargo version (commit) (`git rev-parse --short HEAD`):** **Kubespray version (commit) (`git rev-parse --short HEAD`):**
**Network plugin used**: **Network plugin used**:

View file

@ -90,8 +90,9 @@ before_script:
- pwd - pwd
- ls - ls
- echo ${PWD} - echo ${PWD}
- echo "${STARTUP_SCRIPT}"
- > - >
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL} ${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE} -e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION} -e cloud_region=${CLOUD_REGION}
@ -103,6 +104,7 @@ before_script:
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e mode=${CLUSTER_MODE} -e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID} -e test_id=${TEST_ID}
-e startup_script="'${STARTUP_SCRIPT}'"
# Check out latest tag if testing upgrade # Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags # Uncomment when gitlab kargo repo has tags
@ -116,7 +118,7 @@ before_script:
${SSH_ARGS} ${SSH_ARGS}
${LOG_LEVEL} ${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER} -e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e cert_management=${CERT_MGMT:-script} -e cert_management=${CERT_MGMT:-script}
-e cloud_provider=gce -e cloud_provider=gce
@ -125,6 +127,7 @@ before_script:
-e download_run_once=${DOWNLOAD_RUN_ONCE} -e download_run_once=${DOWNLOAD_RUN_ONCE}
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE} -e resolvconf_mode=${RESOLVCONF_MODE}
@ -134,30 +137,31 @@ before_script:
# Repeat deployment if testing upgrade # Repeat deployment if testing upgrade
- > - >
if [ "${UPGRADE_TEST}" != "false" ]; then if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"; test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
pip install ansible==2.3.0; pip install ansible==2.3.0;
git checkout "${CI_BUILD_REF}"; git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS} ${SSH_ARGS}
${LOG_LEVEL} ${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER} -e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce -e cloud_provider=gce
-e deploy_netchecker=true -e deploy_netchecker=true
-e download_localhost=${DOWNLOAD_LOCALHOST} -e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE} -e download_run_once=${DOWNLOAD_RUN_ONCE}
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubedns_min_replicas=1
-e local_release_dir=${PWD}/downloads -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e resolvconf_mode=${RESOLVCONF_MODE} -e local_release_dir=${PWD}/downloads
-e weave_cpu_requests=${WEAVE_CPU_LIMIT} -e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT} -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
--limit "all:!fake_hosts" -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
$PLAYBOOK; --limit "all:!fake_hosts"
$PLAYBOOK;
fi fi
# Tests Cases # Tests Cases
@ -173,40 +177,41 @@ before_script:
## Idempotency checks 1/5 (repeat deployment) ## Idempotency checks 1/5 (repeat deployment)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST} -e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE} -e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true -e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE} -e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubedns_min_replicas=1
--limit "all:!fake_hosts" -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml; cluster.yml;
fi fi
## Idempotency checks 2/5 (Advanced DNS checks) ## Idempotency checks 2/5 (Advanced DNS checks)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi fi
## Idempotency checks 3/5 (reset deployment) ## Idempotency checks 3/5 (reset deployment)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes -e reset_confirmation=yes
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
reset.yml; reset.yml;
fi fi
@ -214,28 +219,29 @@ before_script:
## Idempotency checks 4/5 (redeploy after reset) ## Idempotency checks 4/5 (redeploy after reset)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST} -e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE} -e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true -e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE} -e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubedns_min_replicas=1
--limit "all:!fake_hosts" -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml; cluster.yml;
fi fi
## Idempotency checks 5/5 (Advanced DNS checks) ## Idempotency checks 5/5 (Advanced DNS checks)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi fi
@ -261,6 +267,8 @@ before_script:
CLUSTER_MODE: separate CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
##User-data to simply turn off coreos upgrades
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables .ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
@ -271,6 +279,7 @@ before_script:
UPGRADE_TEST: "basic" UPGRADE_TEST: "basic"
CLUSTER_MODE: ha CLUSTER_MODE: ha
UPGRADE_TEST: "graceful" UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""
.rhel7_weave_variables: &rhel7_weave_variables .rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
@ -278,6 +287,7 @@ before_script:
CLOUD_IMAGE: rhel-7 CLOUD_IMAGE: rhel-7
CLOUD_REGION: europe-west1-b CLOUD_REGION: europe-west1-b
CLUSTER_MODE: default CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.centos7_flannel_variables: &centos7_flannel_variables .centos7_flannel_variables: &centos7_flannel_variables
# stage: deploy-gce-part2 # stage: deploy-gce-part2
@ -285,13 +295,15 @@ before_script:
CLOUD_IMAGE: centos-7 CLOUD_IMAGE: centos-7
CLOUD_REGION: us-west1-a CLOUD_REGION: us-west1-a
CLUSTER_MODE: default CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.debian8_calico_variables: &debian8_calico_variables .debian8_calico_variables: &debian8_calico_variables
# stage: deploy-gce-part2 # stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: calico KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: debian-8-kubespray CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-central1-b CLOUD_REGION: us-central1-b
CLUSTER_MODE: default CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.coreos_canal_variables: &coreos_canal_variables .coreos_canal_variables: &coreos_canal_variables
# stage: deploy-gce-part2 # stage: deploy-gce-part2
@ -302,6 +314,7 @@ before_script:
BOOTSTRAP_OS: coreos BOOTSTRAP_OS: coreos
IDEMPOT_CHECK: "true" IDEMPOT_CHECK: "true"
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables .rhel7_canal_sep_variables: &rhel7_canal_sep_variables
# stage: deploy-gce-special # stage: deploy-gce-special
@ -309,6 +322,7 @@ before_script:
CLOUD_IMAGE: rhel-7 CLOUD_IMAGE: rhel-7
CLOUD_REGION: us-east1-b CLOUD_REGION: us-east1-b
CLUSTER_MODE: separate CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables .ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
# stage: deploy-gce-special # stage: deploy-gce-special
@ -317,6 +331,7 @@ before_script:
CLOUD_REGION: us-central1-b CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate CLUSTER_MODE: separate
IDEMPOT_CHECK: "false" IDEMPOT_CHECK: "false"
STARTUP_SCRIPT: ""
.centos7_calico_ha_variables: &centos7_calico_ha_variables .centos7_calico_ha_variables: &centos7_calico_ha_variables
# stage: deploy-gce-special # stage: deploy-gce-special
@ -327,6 +342,7 @@ before_script:
CLOUD_REGION: europe-west1-b CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha-scale CLUSTER_MODE: ha-scale
IDEMPOT_CHECK: "true" IDEMPOT_CHECK: "true"
STARTUP_SCRIPT: ""
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables .coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
# stage: deploy-gce-special # stage: deploy-gce-special
@ -336,6 +352,7 @@ before_script:
CLUSTER_MODE: ha-scale CLUSTER_MODE: ha-scale
BOOTSTRAP_OS: coreos BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables .ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
@ -345,6 +362,7 @@ before_script:
CLUSTER_MODE: separate CLUSTER_MODE: separate
ETCD_DEPLOYMENT: rkt ETCD_DEPLOYMENT: rkt
KUBELET_DEPLOYMENT: rkt KUBELET_DEPLOYMENT: rkt
STARTUP_SCRIPT: ""
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
@ -353,6 +371,7 @@ before_script:
CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: us-central1-b CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto) # Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
coreos-calico-sep: coreos-calico-sep:
@ -588,7 +607,7 @@ ci-authorized:
script: script:
- /bin/sh scripts/premoderator.sh - /bin/sh scripts/premoderator.sh
except: ['triggers', 'master'] except: ['triggers', 'master']
syntax-check: syntax-check:
<<: *job <<: *job
stage: unit-tests stage: unit-tests

View file

@ -2,7 +2,7 @@
## Deploy a production ready kubernetes cluster ## Deploy a production ready kubernetes cluster
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**. If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**.
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** - Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster - **High available** cluster
@ -13,13 +13,13 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
To deploy the cluster you can use : To deploy the cluster you can use :
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br> [**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br> **Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br> **vagrant** by simply running `vagrant up` (for tests purposes) <br>
* [Requirements](#requirements) * [Requirements](#requirements)
* [Kargo vs ...](docs/comparisons.md) * [Kubespray vs ...](docs/comparisons.md)
* [Getting started](docs/getting-started.md) * [Getting started](docs/getting-started.md)
* [Ansible inventory and tags](docs/ansible.md) * [Ansible inventory and tags](docs/ansible.md)
* [Integration with existing ansible repo](docs/integration.md) * [Integration with existing ansible repo](docs/integration.md)
@ -98,22 +98,22 @@ option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md). See also [Network checker](docs/netcheck.md).
## Community docs and resources ## Community docs and resources
- [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/) - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## Tools and projects on top of Kargo ## Tools and projects on top of Kubespray
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar) - [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
- [Kargo-cli](https://github.com/kubespray/kargo-cli) - [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer) - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
## CI Tests ## CI Tests
![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png) ![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br> [![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack). CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
See the [test matrix](docs/test_cases.md) for details. See the [test matrix](docs/test_cases.md) for details.

View file

@ -1,16 +1,16 @@
# Release Process # Release Process
The Kargo Project is released on an as-needed basis. The process is as follows: The Kubespray Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release 1. An issue is proposing a new release with a changelog since the last release
2. At least on of the [OWNERS](OWNERS) must LGTM this release 2. At least on of the [OWNERS](OWNERS) must LGTM this release
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
4. The release issue is closed 4. The release issue is closed
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released` 5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
## Major/minor releases, merge freezes and milestones ## Major/minor releases, merge freezes and milestones
* Kargo does not maintain stable branches for releases. Releases are tags, not * Kubespray does not maintain stable branches for releases. Releases are tags, not
branches, and there are no backports. Therefore, there is no need for merge branches, and there are no backports. Therefore, there is no need for merge
freezes as well. freezes as well.
@ -20,21 +20,21 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
support lifetime, which ends once the milestone closed. Then only a next major support lifetime, which ends once the milestone closed. Then only a next major
or minor release can be done. or minor release can be done.
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor * Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
version numbers and other components' arbitrary versions, like etcd or network plugins. version numbers and other components' arbitrary versions, like etcd or network plugins.
Older or newer versions are not supported and not tested for the given release. Older or newer versions are not supported and not tested for the given release.
* There is no unstable releases and no APIs, thus Kargo doesn't follow * There is no unstable releases and no APIs, thus Kubespray doesn't follow
[semver](http://semver.org/). Every version describes only a stable release. [semver](http://semver.org/). Every version describes only a stable release.
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles' Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
playbooks, shall be described in the release notes. Other breaking changes, if any in playbooks, shall be described in the release notes. Other breaking changes, if any in
the contributed addons or bound versions of Kubernetes and other components, are the contributed addons or bound versions of Kubernetes and other components, are
considered out of Kargo scope and are up to the components' teams to deal with and considered out of Kubespray scope and are up to the components' teams to deal with and
document. document.
* Minor releases can change components' versions, but not the major ``kube_version``. * Minor releases can change components' versions, but not the major ``kube_version``.
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0 Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``, is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1 then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
and *any* changes to other components, like etcd v4, or calico 1.2.3. and *any* changes to other components, like etcd v4, or calico 1.2.3.
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively. And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.

6
Vagrantfile vendored
View file

@ -13,7 +13,7 @@ SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]}, "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]}, "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
} }
# Defaults for config options defined in CONFIG # Defaults for config options defined in CONFIG
@ -100,6 +100,10 @@ Vagrant.configure("2") do |config|
end end
end end
$shared_folders.each do |src, dst|
config.vm.synced_folder src, dst
end
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb|
vb.gui = $vm_gui vb.gui = $vm_gui
vb.memory = $vm_memory vb.memory = $vm_memory

View file

@ -2,7 +2,7 @@
- hosts: localhost - hosts: localhost
gather_facts: False gather_facts: False
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -13,7 +13,7 @@
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -25,7 +25,7 @@
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker }
@ -36,38 +36,38 @@
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: kubespray-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd - hosts: etcd
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true } - { role: etcd, tags: etcd, etcd_cluster_setup: true }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false } - { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: vault, tags: vault, when: "cert_management == 'vault'"} - { role: vault, tags: vault, when: "cert_management == 'vault'"}
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- hosts: kube-master - hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
@ -75,18 +75,18 @@
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network } - { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
- hosts: kube-master[0] - hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes-apps, tags: apps } - { role: kubernetes-apps, tags: apps }

View file

@ -33,10 +33,10 @@ class SearchEC2Tags(object):
hosts = {} hosts = {}
hosts['_meta'] = { 'hostvars': {} } hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value. ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube-master", "kube-node", "etcd"]: for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = [] hosts[group] = []
tag_key = "kargo-role" tag_key = "kubespray-role"
tag_value = ["*"+group+"*"] tag_value = ["*"+group+"*"]
region = os.environ['REGION'] region = os.environ['REGION']

View file

@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
## Status ## Status
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course). Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
## Requirements ## Requirements
@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
**WARNING** this really deletes everything from your resource group, including everything that was later created by you! **WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Generating an inventory for kargo ## Generating an inventory for kubespray
After you have applied the templates, you can generate an inventory with this call: After you have applied the templates, you can generate an inventory with this call:
@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
$ ./generate-inventory.sh <resource_group_name> $ ./generate-inventory.sh <resource_group_name>
``` ```
It will create the file ./inventory which can then be used with kargo, e.g.: It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell ```shell
$ cd kargo-root-dir $ cd kubespray-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml $ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
``` ```

View file

@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
# Configurable as shell vars end # Configurable as shell vars end
class KargoInventory(object): class KubesprayInventory(object):
def __init__(self, changed_hosts=None, config_file=None): def __init__(self, changed_hosts=None, config_file=None):
self.config = configparser.ConfigParser(allow_no_value=True, self.config = configparser.ConfigParser(allow_no_value=True,
@ -337,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
def main(argv=None): def main(argv=None):
if not argv: if not argv:
argv = sys.argv[1:] argv = sys.argv[1:]
KargoInventory(argv, CONFIG_FILE) KubesprayInventory(argv, CONFIG_FILE)
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -1,3 +1,3 @@
[metadata] [metadata]
name = kargo-inventory-builder name = kubespray-inventory-builder
version = 0.1 version = 0.1

View file

@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
sys_mock.exit = mock.Mock() sys_mock.exit = mock.Mock()
super(TestInventory, self).setUp() super(TestInventory, self).setUp()
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4'] self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
self.inv = inventory.KargoInventory() self.inv = inventory.KubesprayInventory()
def test_get_ip_from_opts(self): def test_get_ip_from_opts(self):
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2" optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"

View file

@ -1,11 +1,11 @@
# Kargo on KVM Virtual Machines hypervisor preparation # Kubespray on KVM Virtual Machines hypervisor preparation
A simple playbook to ensure your system has the right settings to enable Kargo A simple playbook to ensure your system has the right settings to enable Kubespray
deployment on VMs. deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kargo itself. This playbook does not create Virtual Machines, nor does it run Kubespray itself.
### User creation ### User creation
If you want to create a user for running Kargo deployment, you should specify If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

View file

@ -1,3 +1,3 @@
#k8s_deployment_user: kargo #k8s_deployment_user: kubespray
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa #k8s_deployment_user_pkey_path: /tmp/ssh_rsa

View file

@ -12,9 +12,9 @@
line: 'br_netfilter' line: 'br_netfilter'
when: br_netfilter is defined and ansible_os_family == 'Debian' when: br_netfilter is defined and ansible_os_family == 'Debian'
- name: Add br_netfilter into /etc/modules-load.d/kargo.conf - name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
copy: copy:
dest: /etc/modules-load.d/kargo.conf dest: /etc/modules-load.d/kubespray.conf
content: |- content: |-
### This file is managed by Ansible ### This file is managed by Ansible
br-netfilter br-netfilter

View file

@ -1,4 +1,4 @@
# Deploying a Kargo Kubernetes Cluster with GlusterFS # Deploying a Kubespray Kubernetes Cluster with GlusterFS
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section. You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu): Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
``` ```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
## Using Terraform and Ansible ## Using Terraform and Ansible
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
``` ```
cluster_name = "cluster1" cluster_name = "cluster1"
@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
export TF_VAR_auth_url=${OS_AUTH_URL} export TF_VAR_auth_url=${OS_AUTH_URL}
``` ```
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
``` ```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping). This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
Then, provision your Kubernetes (Kargo) cluster with the following ansible call: Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
``` ```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
If you need to destroy the cluster, you can run: If you need to destroy the cluster, you can run:
``` ```
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```

View file

@ -33,7 +33,7 @@ export AWS_DEFAULT_REGION="zzz"
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag. - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
**Troubleshooting** **Troubleshooting**
@ -54,4 +54,4 @@ It could happen that Terraform doesnt create an Ansible Inventory file automatic
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
![AWS Infrastructure with Terraform ](docs/aws_kargo.png) ![AWS Infrastructure with Terraform ](docs/aws_kubespray.png)

View file

@ -157,7 +157,7 @@ resource "aws_instance" "k8s-worker" {
/* /*
* Create Kargo Inventory File * Create Kubespray Inventory File
* *
*/ */
data "template_file" "inventory" { data "template_file" "inventory" {

View file

Before

Width:  |  Height:  |  Size: 114 KiB

After

Width:  |  Height:  |  Size: 114 KiB

View file

@ -75,25 +75,25 @@ According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variab
those cannot be overriden from the group vars. In order to override, one should use those cannot be overriden from the group vars. In order to override, one should use
the `-e ` runtime flags (most simple way) or other layers described in the docs. the `-e ` runtime flags (most simple way) or other layers described in the docs.
Kargo uses only a few layers to override things (or expect them to Kubespray uses only a few layers to override things (or expect them to
be overriden for roles): be overriden for roles):
Layer | Comment Layer | Comment
------|-------- ------|--------
**role defaults** | provides best UX to override things for Kargo deployments **role defaults** | provides best UX to override things for Kubespray deployments
inventory vars | Unused inventory vars | Unused
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things **inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
inventory host_vars | Unused inventory host_vars | Unused
playbook group_vars | Unuses playbook group_vars | Unused
playbook host_vars | Unused playbook host_vars | Unused
**host facts** | Kargo overrides for internal roles' logic, like state flags **host facts** | Kubespray overrides for internal roles' logic, like state flags
play vars | Unused play vars | Unused
play vars_prompt | Unused play vars_prompt | Unused
play vars_files | Unused play vars_files | Unused
registered vars | Unused registered vars | Unused
set_facts | Kargo overrides those, for some places set_facts | Kubespray overrides those, for some places
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce **role and include vars** | Provides bad UX to override things! Use extra vars to enforce
block vars (only for tasks in block) | Kargo overrides for internal roles' logic block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
task vars (only for the task) | Unused for roles, but only for helper scripts task vars (only for the task) | Unused for roles, but only for helper scripts
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
@ -124,12 +124,12 @@ The following tags are defined in playbooks:
| k8s-pre-upgrade | Upgrading K8s cluster | k8s-pre-upgrade | Upgrading K8s cluster
| k8s-secrets | Configuring K8s certs/keys | k8s-secrets | Configuring K8s certs/keys
| kpm | Installing K8s apps definitions with KPM | kpm | Installing K8s apps definitions with KPM
| kube-apiserver | Configuring self-hosted kube-apiserver | kube-apiserver | Configuring static pod kube-apiserver
| kube-controller-manager | Configuring self-hosted kube-controller-manager | kube-controller-manager | Configuring static pod kube-controller-manager
| kubectl | Installing kubectl and bash completion | kubectl | Installing kubectl and bash completion
| kubelet | Configuring kubelet service | kubelet | Configuring kubelet service
| kube-proxy | Configuring self-hosted kube-proxy | kube-proxy | Configuring static pod kube-proxy
| kube-scheduler | Configuring self-hosted kube-scheduler | kube-scheduler | Configuring static pod kube-scheduler
| localhost | Special steps for the localhost (ansible runner) | localhost | Special steps for the localhost (ansible runner)
| master | Configuring K8s master node role | master | Configuring K8s master node role
| netchecker | Installing netchecker K8s app | netchecker | Installing netchecker K8s app

View file

@ -3,7 +3,7 @@ AWS
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
@ -45,12 +45,12 @@ This will produce an inventory that is passed into Ansible that looks like the f
Guide: Guide:
- Create instances in AWS as needed. - Create instances in AWS as needed.
- Either during or after creation, add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` - Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
- Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory. - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
- Set the following AWS credentials and info as environment variables in your terminal: - Set the following AWS credentials and info as environment variables in your terminal:
``` ```
export AWS_ACCESS_KEY_ID="xxxxx" export AWS_ACCESS_KEY_ID="xxxxx"
export AWS_SECRET_ACCESS_KEY="yyyyy" export AWS_SECRET_ACCESS_KEY="yyyyy"
export REGION="us-east-2" export REGION="us-east-2"
``` ```
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` - We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`

View file

@ -96,7 +96,7 @@ You need to edit your inventory and add:
* `cluster_id` by route reflector node/group (see details * `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/)) [here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kargo inventory with route reflectors: Here's an example of Kubespray inventory with route reflectors:
``` ```
[all] [all]
@ -145,11 +145,11 @@ cluster_id="1.0.0.1"
The inventory above will deploy the following topology assuming that calico's The inventory above will deploy the following topology assuming that calico's
`global_as_num` is set to `65400`: `global_as_num` is set to `65400`:
![Image](figures/kargo-calico-rr.png?raw=true) ![Image](figures/kubespray-calico-rr.png?raw=true)
##### Optional : Define default endpoint to host action ##### Optional : Define default endpoint to host action
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kargo) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped. By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
To re-define default action please set the following variable in your inventory: To re-define default action please set the following variable in your inventory:

View file

@ -3,17 +3,17 @@ Cloud providers
#### Provisioning #### Provisioning
You can use kargo-cli to start new instances on cloud providers You can use kubespray-cli to start new instances on cloud providers
here's an example here's an example
``` ```
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
``` ```
#### Deploy kubernetes #### Deploy kubernetes
With kargo-cli With kubespray-cli
``` ```
kargo deploy [--aws|--gce] -u admin kubespray deploy [--aws|--gce] -u admin
``` ```
Or ansible-playbook command Or ansible-playbook command

View file

@ -1,25 +1,25 @@
Kargo vs [Kops](https://github.com/kubernetes/kops) Kubespray vs [Kops](https://github.com/kubernetes/kops)
--------------- ---------------
Kargo runs on bare metal and most clouds, using Ansible as its substrate for Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
provisioning and orchestration. Kops performs the provisioning and orchestration provisioning and orchestration. Kops performs the provisioning and orchestration
itself, and as such is less flexible in deployment platforms. For people with itself, and as such is less flexible in deployment platforms. For people with
familiarity with Ansible, existing Ansible deployments or the desire to run a familiarity with Ansible, existing Ansible deployments or the desire to run a
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops, Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
however, is more tightly integrated with the unique features of the clouds it however, is more tightly integrated with the unique features of the clouds it
supports so it could be a better choice if you know that you will only be using supports so it could be a better choice if you know that you will only be using
one platform for the foreseeable future. one platform for the foreseeable future.
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm) Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
------------------ ------------------
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
management, including self-hosted layouts, dynamic discovery services and so management, including self-hosted layouts, dynamic discovery services and so
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html), on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
it would've likely been named a "Kubernetes cluster operator". Kargo however, it would've likely been named a "Kubernetes cluster operator". Kubespray however,
does generic configuration management tasks from the "OS operators" ansible does generic configuration management tasks from the "OS operators" ansible
world, plus some initial K8s clustering (with networking plugins included) and world, plus some initial K8s clustering (with networking plugins included) and
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553) control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
to adopt kubeadm as a tool in order to consume life cycle management domain to adopt kubeadm as a tool in order to consume life cycle management domain
knowledge from it and offload generic OS configuration things from it, which knowledge from it and offload generic OS configuration things from it, which
hopefully benefits both sides. hopefully benefits both sides.

View file

@ -1,10 +1,10 @@
CoreOS bootstrap CoreOS bootstrap
=============== ===============
Example with **kargo-cli**: Example with **kubespray-cli**:
``` ```
kargo deploy --gce --coreos kubespray deploy --gce --coreos
``` ```
Or with Ansible: Or with Ansible:

View file

@ -1,7 +1,7 @@
K8s DNS stack by Kargo K8s DNS stack by Kubespray
====================== ======================
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/) For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md) [cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
to serve as an authoritative DNS server for a given ``dns_domain`` and its to serve as an authoritative DNS server for a given ``dns_domain`` and its
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels). ``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``). DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
DNS modes supported by kargo DNS modes supported by Kubespray
============================ ============================
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``. You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
## dns_mode ## dns_mode
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available: ``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
#### dnsmasq_kubedns (default) #### dnsmasq_kubedns (default)
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
@ -67,7 +67,7 @@ This does not install any of dnsmasq and kubedns/skydns. This basically disables
leaves you with a non functional cluster. leaves you with a non functional cluster.
## resolvconf_mode ## resolvconf_mode
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers. ``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
There are three modes available: There are three modes available:
#### docker_dns (default) #### docker_dns (default)
@ -100,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
servers, which in turn will forward queries to the system nameserver if required. servers, which in turn will forward queries to the system nameserver if required.
#### host_resolvconf #### host_resolvconf
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode). configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
@ -120,7 +120,7 @@ cluster service names.
Limitations Limitations
----------- -----------
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can * Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
not answer with authority to arbitrary recursive resolvers. This task is left not answer with authority to arbitrary recursive resolvers. This task is left
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns) for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
for details. for details.

View file

@ -1,7 +1,7 @@
Downloading binaries and containers Downloading binaries and containers
=================================== ===================================
Kargo supports several download/upload modes. The default is: Kubespray supports several download/upload modes. The default is:
* Each node downloads binaries and container images on its own, which is * Each node downloads binaries and container images on its own, which is
``download_run_once: False``. ``download_run_once: False``.

View file

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View file

@ -1,21 +1,21 @@
Getting started Getting started
=============== ===============
The easiest way to run the deployement is to use the **kargo-cli** tool. The easiest way to run the deployement is to use the **kubespray-cli** tool.
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli). A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
Here is a simple example on AWS: Here is a simple example on AWS:
* Create instances and generate the inventory * Create instances and generate the inventory
``` ```
kargo aws --instances 3 kubespray aws --instances 3
``` ```
* Run the deployment * Run the deployment
``` ```
kargo deploy --aws -u centos -n calico kubespray deploy --aws -u centos -n calico
``` ```
Building your own inventory Building your own inventory
@ -23,12 +23,12 @@ Building your own inventory
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
an example inventory located an example inventory located
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example). [here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
You can use an You can use an
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) [inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
to create or modify an Ansible inventory. Currently, it is limited in to create or modify an Ansible inventory. Currently, it is limited in
functionality and is only use for making a basic Kargo cluster, but it does functionality and is only use for making a basic Kubespray cluster, but it does
support creating large clusters. It now supports support creating large clusters. It now supports
separated ETCD and Kubernetes master roles from node role if the size exceeds a separated ETCD and Kubernetes master roles from node role if the size exceeds a
certain threshold. Run inventory.py help for more information. certain threshold. Run inventory.py help for more information.

View file

@ -22,7 +22,7 @@ Kube-apiserver
-------------- --------------
K8s components require a loadbalancer to access the apiservers via a reverse K8s components require a loadbalancer to access the apiservers via a reverse
proxy. Kargo includes support for an nginx-based proxy that resides on each proxy. Kubespray includes support for an nginx-based proxy that resides on each
non-master Kubernetes node. This is referred to as localhost loadbalancing. It non-master Kubernetes node. This is referred to as localhost loadbalancing. It
is less efficient than a dedicated load balancer because it creates extra is less efficient than a dedicated load balancer because it creates extra
health checks on the Kubernetes apiserver, but is more practical for scenarios health checks on the Kubernetes apiserver, but is more practical for scenarios
@ -30,12 +30,12 @@ where an external LB or virtual IP management is inconvenient. This option is
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`). configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
You may also define the port the local internal loadbalancer users by changing, You may also define the port the local internal loadbalancer users by changing,
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`. `nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
It is also import to note that Kargo will only configure kubelet and kube-proxy It is also import to note that Kubespray will only configure kubelet and kube-proxy
on non-master nodes to use the local internal loadbalancer. on non-master nodes to use the local internal loadbalancer.
If you choose to NOT use the local internal loadbalancer, you will need to configure If you choose to NOT use the local internal loadbalancer, you will need to configure
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
a user and is not covered by ansible roles in Kargo. By default, it only configures a user and is not covered by ansible roles in Kubespray. By default, it only configures
a non-HA endpoint, which points to the `access_ip` or IP address of the first server a non-HA endpoint, which points to the `access_ip` or IP address of the first server
node in the `kube-master` group. It can also configure clients to use endpoints node in the `kube-master` group. It can also configure clients to use endpoints
for a given loadbalancer type. The following diagram shows how traffic to the for a given loadbalancer type. The following diagram shows how traffic to the

View file

@ -1,7 +1,7 @@
Network Checker Application Network Checker Application
=========================== ===========================
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
images. It consists of the server and agents trying to reach the server by usual images. It consists of the server and agents trying to reach the server by usual
for Kubernetes applications network connectivity meanings. Therefore, this for Kubernetes applications network connectivity meanings. Therefore, this
@ -17,7 +17,7 @@ any of the cluster nodes:
``` ```
curl http://localhost:31081/api/v1/connectivity_check curl http://localhost:31081/api/v1/connectivity_check
``` ```
Note that Kargo does not invoke the check but only deploys the application, if Note that Kubespray does not invoke the check but only deploys the application, if
requested. requested.
There are related application specifc variables: There are related application specifc variables:

View file

@ -1,23 +1,23 @@
Kargo's roadmap Kubespray's roadmap
================= =================
### Kubeadm ### Kubeadm
- Propose kubeadm as an option in order to setup the kubernetes cluster. - Propose kubeadm as an option in order to setup the kubernetes cluster.
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553) That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320) ### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster - the playbook would install and configure docker/rkt and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm) - a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
- to be discussed, a way to provide the inventory - to be discussed, a way to provide the inventory
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321) - **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
### Provisionning and cloud providers ### Provisionning and cloud providers
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure** - [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
- [ ] On AWS autoscaling, multi AZ - [ ] On AWS autoscaling, multi AZ
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297) - [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280) - [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234) - [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234)
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br> (related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
https://github.com/kubernetes/kubernetes/issues/18112) https://github.com/kubernetes/kubernetes/issues/18112)
@ -37,14 +37,14 @@ That would probably improve deployment speed and certs management [#553](https:/
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node - [ ] test scale up cluster: +1 etcd, +1 master, +1 node
### Lifecycle ### Lifecycle
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553) - [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553)
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154) - [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154)
- [ ] Drain worker node when shutting down/deleting an instance - [ ] Drain worker node when shutting down/deleting an instance
- [ ] Upgrade granularity: select components to upgrade and skip others - [ ] Upgrade granularity: select components to upgrade and skip others
### Networking ### Networking
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160) - [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160)
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159) - [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159)
- [ ] Opencontrail - [ ] Opencontrail
- [x] Canal - [x] Canal
- [x] Cloud Provider native networking (instead of our network plugins) - [x] Cloud Provider native networking (instead of our network plugins)
@ -53,14 +53,14 @@ That would probably improve deployment speed and certs management [#553](https:/
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived - (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed. While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
### Kargo-cli ### Kubespray-cli
- Delete instances - Delete instances
- `kargo vagrant` to setup a test cluster locally - `kubespray vagrant` to setup a test cluster locally
- `kargo azure` for Microsoft Azure support - `kubespray azure` for Microsoft Azure support
- switch to Terraform instead of Ansible for provisionning - switch to Terraform instead of Ansible for provisionning
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context - update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
### Kargo API ### Kubespray API
- Perform all actions through an **API** - Perform all actions through an **API**
- Store inventories / configurations of mulltiple clusters - Store inventories / configurations of mulltiple clusters
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory - make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
@ -87,8 +87,8 @@ Include optionals deployments to init the cluster:
### Others ### Others
- remove nodes (adding is already supported) - remove nodes (adding is already supported)
- being able to choose any k8s version (almost done) - being able to choose any k8s version (almost done)
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59) - **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59)
- Review documentation (split in categories) - Review documentation (split in categories)
- **consul** -> if officialy supported by k8s - **consul** -> if officialy supported by k8s
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312) - flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329) - Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)

View file

@ -1,11 +1,11 @@
Upgrading Kubernetes in Kargo Upgrading Kubernetes in Kubespray
============================= =============================
#### Description #### Description
Kargo handles upgrades the same way it handles initial deployment. That is to Kubespray handles upgrades the same way it handles initial deployment. That is to
say that each component is laid down in a fixed order. You should be able to say that each component is laid down in a fixed order. You should be able to
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
also individually control versions of components by explicitly defining their also individually control versions of components by explicitly defining their
versions. Here are all version vars for each component: versions. Here are all version vars for each component:
@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
#### Graceful upgrade #### Graceful upgrade
Kargo also supports cordon, drain and uncordoning of nodes when performing Kubespray also supports cordon, drain and uncordoning of nodes when performing
a cluster upgrade. There is a separate playbook used for this purpose. It is a cluster upgrade. There is a separate playbook used for this purpose. It is
important to note that upgrade-cluster.yml can only be used for upgrading an important to note that upgrade-cluster.yml can only be used for upgrading an
existing cluster. That means there must be at least 1 kube-master already existing cluster. That means there must be at least 1 kube-master already

View file

@ -1,4 +1,4 @@
Configurable Parameters in Kargo Configurable Parameters in Kubespray
================================ ================================
#### Generic Ansible variables #### Generic Ansible variables
@ -12,7 +12,7 @@ Some variables of note include:
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses. * *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
Generated based on the output from the command ``ip -4 route get 8.8.8.8`` Generated based on the output from the command ``ip -4 route get 8.8.8.8``
#### Common vars that are used in Kargo #### Common vars that are used in Kubespray
* *calico_version* - Specify version of Calico to use * *calico_version* - Specify version of Calico to use
* *calico_cni_version* - Specify version of Calico CNI plugin to use * *calico_cni_version* - Specify version of Calico CNI plugin to use
@ -35,16 +35,16 @@ Some variables of note include:
* *access_ip* - IP for other hosts to use to connect to. Often required when * *access_ip* - IP for other hosts to use to connect to. Often required when
deploying from a cloud, such as OpenStack or GCE and you have separate deploying from a cloud, such as OpenStack or GCE and you have separate
public/floating and private IPs. public/floating and private IPs.
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip * *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
and access_ip are undefined and access_ip are undefined
* *loadbalancer_apiserver* - If defined, all hosts will connect to this * *loadbalancer_apiserver* - If defined, all hosts will connect to this
address instead of localhost for kube-masters and kube-master[0] for address instead of localhost for kube-masters and kube-master[0] for
kube-nodes. See more details in the kube-nodes. See more details in the
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md). [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to * *loadbalancer_apiserver_localhost* - makes all hosts to connect to
the apiserver internally load balanced endpoint. Mutual exclusive to the the apiserver internally load balanced endpoint. Mutual exclusive to the
`loadbalancer_apiserver`. See more details in the `loadbalancer_apiserver`. See more details in the
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md). [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
#### Cluster variables #### Cluster variables
@ -79,13 +79,13 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
variables to match your requirements. variables to match your requirements.
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in * *upstream_dns_servers* - Array of upstream DNS servers configured on host in
addition to Kargo deployed DNS addition to Kubespray deployed DNS
* *nameservers* - Array of DNS servers configured for use in dnsmasq * *nameservers* - Array of DNS servers configured for use in dnsmasq
* *searchdomains* - Array of up to 4 search domains * *searchdomains* - Array of up to 4 search domains
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS) * *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
For more information, see [DNS For more information, see [DNS
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md). Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
#### Other service variables #### Other service variables
@ -114,5 +114,5 @@ The possible vars are:
#### User accounts #### User accounts
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their Kubespray sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
passwords default to changeme. You can set this by changing ``kube_api_pwd``. passwords default to changeme. You can set this by changing ``kube_api_pwd``.

View file

@ -39,7 +39,7 @@ vault group.
It is *highly* recommended that these secrets are removed from the servers after It is *highly* recommended that these secrets are removed from the servers after
your cluster has been deployed, and kept in a safe location of your choosing. your cluster has been deployed, and kept in a safe location of your choosing.
Naturally, the seriousness of the situation depends on what you're doing with Naturally, the seriousness of the situation depends on what you're doing with
your Kargo cluster, but with these secrets, an attacker will have the ability your Kubespray cluster, but with these secrets, an attacker will have the ability
to authenticate to almost everything in Kubernetes and decode all private to authenticate to almost everything in Kubernetes and decode all private
(HTTPS) traffic on your network signed by Vault certificates. (HTTPS) traffic on your network signed by Vault certificates.

View file

@ -11,7 +11,7 @@
- hosts: localhost - hosts: localhost
gather_facts: False gather_facts: False
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -22,7 +22,7 @@
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -34,7 +34,7 @@
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
#Handle upgrades to master components first to maintain backwards compat. #Handle upgrades to master components first to maintain backwards compat.
@ -42,7 +42,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1 serial: 1
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
@ -53,8 +53,8 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}" serial: "{{ serial | default('20%') }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kargo-defaults} - { role: kubespray-defaults}

View file

@ -83,6 +83,9 @@ bin_dir: /usr/local/bin
## Please note that overlay2 is only supported on newer kernels ## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2 #docker_storage_options: -s overlay2
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
#docker_dns_servers_strict: false
## Default packages to install within the cluster, f.e: ## Default packages to install within the cluster, f.e:
#kpm_packages: #kpm_packages:
# - name: kube-system/grafana # - name: kube-system/grafana

View file

@ -133,3 +133,8 @@ efk_enabled: false
# Helm deployment # Helm deployment
helm_enabled: false helm_enabled: false
# dnsmasq
# dnsmasq_upstream_dns_servers:
# - /resolvethiszone.with/10.0.4.250
# - 8.8.8.8

View file

@ -66,7 +66,7 @@ options:
description: description:
- present handles checking existence or creating if definition file provided, - present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options, absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence, latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file, reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options. stopped handles stopping resource(s) based on other options.
requirements: requirements:

View file

@ -14,5 +14,5 @@
when: reset_confirmation != "yes" when: reset_confirmation != "yes"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: reset, tags: reset } - { role: reset, tags: reset }

View file

@ -2,3 +2,4 @@
pypy_version: 2.4.0 pypy_version: 2.4.0
pip_python_modules: pip_python_modules:
- httplib2 - httplib2
- six

View file

@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
# Autoscaler parameters # Autoscaler parameters
dnsmasq_nodes_per_replica: 10 dnsmasq_nodes_per_replica: 10
dnsmasq_min_replicas: 1 dnsmasq_min_replicas: 1
# Custom name servers
dnsmasq_upstream_dns_servers: []

View file

@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }}
local=/{{ bogus_domains }} local=/{{ bogus_domains }}
#Set upstream dns servers #Set upstream dns servers
{% if dnsmasq_upstream_dns_servers|length > 0 %}
{% for srv in dnsmasq_upstream_dns_servers %}
server={{ srv }}
{% endfor %}
{% endif %}
{% if system_and_upstream_dns_servers|length > 0 %} {% if system_and_upstream_dns_servers|length > 0 %}
{% for srv in system_and_upstream_dns_servers %} {% for srv in system_and_upstream_dns_servers %}
server={{ srv }} server={{ srv }}

View file

@ -19,7 +19,7 @@ spec:
labels: labels:
k8s-app: dnsmasq k8s-app: dnsmasq
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
kargo/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
spec: spec:
containers: containers:
- name: dnsmasq - name: dnsmasq

View file

@ -8,3 +8,5 @@ docker_repo_key_info:
docker_repo_info: docker_repo_info:
repos: repos:
docker_dns_servers_strict: yes

View file

@ -52,8 +52,13 @@
- name: check number of nameservers - name: check number of nameservers
fail: fail:
msg: "Too many nameservers" msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
when: docker_dns_servers|length > 3 when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3
set_fact:
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool
- name: check number of search domains - name: check number of search domains
fail: fail:

View file

@ -52,9 +52,6 @@ calico_policy_image_repo: "calico/kube-policy-controller"
calico_policy_image_tag: "{{ calico_policy_version }}" calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "v0.3.0" calico_rr_image_tag: "v0.3.0"
exechealthz_version: 1.1
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"
hyperkube_image_repo: "quay.io/coreos/hyperkube" hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0" hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
@ -74,12 +71,16 @@ nginx_image_tag: 1.11.4-alpine
dnsmasq_version: 2.72 dnsmasq_version: 2.72
dnsmasq_image_repo: "andyshinn/dnsmasq" dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}" dnsmasq_image_tag: "{{ dnsmasq_version }}"
kubednsmasq_version: 1.3 kubedns_version: 1.14.2
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64" kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
kubedns_version: 1.7
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}" kubedns_image_tag: "{{ kubedns_version }}"
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
kubednsautoscaler_version: 1.1.1
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox test_image_repo: busybox
test_image_tag: latest test_image_tag: latest
elasticsearch_version: "v2.4.1" elasticsearch_version: "v2.4.1"
@ -193,26 +194,31 @@ downloads:
repo: "{{ dnsmasq_image_repo }}" repo: "{{ dnsmasq_image_repo }}"
tag: "{{ dnsmasq_image_tag }}" tag: "{{ dnsmasq_image_tag }}"
sha256: "{{ dnsmasq_digest_checksum|default(None) }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
kubednsmasq:
container: true
repo: "{{ kubednsmasq_image_repo }}"
tag: "{{ kubednsmasq_image_tag }}"
sha256: "{{ kubednsmasq_digest_checksum|default(None) }}"
kubedns: kubedns:
container: true container: true
repo: "{{ kubedns_image_repo }}" repo: "{{ kubedns_image_repo }}"
tag: "{{ kubedns_image_tag }}" tag: "{{ kubedns_image_tag }}"
sha256: "{{ kubedns_digest_checksum|default(None) }}" sha256: "{{ kubedns_digest_checksum|default(None) }}"
dnsmasq_nanny:
container: true
repo: "{{ dnsmasq_nanny_image_repo }}"
tag: "{{ dnsmasq_nanny_image_tag }}"
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
dnsmasq_sidecar:
container: true
repo: "{{ dnsmasq_sidecar_image_repo }}"
tag: "{{ dnsmasq_sidecar_image_tag }}"
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
kubednsautoscaler:
container: true
repo: "{{ kubednsautoscaler_image_repo }}"
tag: "{{ kubednsautoscaler_image_tag }}"
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
testbox: testbox:
container: true container: true
repo: "{{ test_image_repo }}" repo: "{{ test_image_repo }}"
tag: "{{ test_image_tag }}" tag: "{{ test_image_tag }}"
sha256: "{{ testbox_digest_checksum|default(None) }}" sha256: "{{ testbox_digest_checksum|default(None) }}"
exechealthz:
container: true
repo: "{{ exechealthz_image_repo }}"
tag: "{{ exechealthz_image_tag }}"
sha256: "{{ exechealthz_digest_checksum|default(None) }}"
elasticsearch: elasticsearch:
container: true container: true
repo: "{{ elasticsearch_image_repo }}" repo: "{{ elasticsearch_image_repo }}"

View file

@ -2,6 +2,7 @@
# Set to false to only do certificate management # Set to false to only do certificate management
etcd_cluster_setup: true etcd_cluster_setup: true
etcd_backup_prefix: "/var/backups"
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/" etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
etcd_data_dir: "/var/lib/etcd" etcd_data_dir: "/var/lib/etcd"

View file

@ -3,7 +3,6 @@
command: /bin/true command: /bin/true
notify: notify:
- Refresh Time Fact - Refresh Time Fact
- Set etcd Backup Directory Prefix
- Set Backup Directory - Set Backup Directory
- Create Backup Directory - Create Backup Directory
- Backup etcd v2 data - Backup etcd v2 data
@ -13,10 +12,6 @@
- name: Refresh Time Fact - name: Refresh Time Fact
setup: filter=ansible_date_time setup: filter=ansible_date_time
- name: Set etcd Backup Directory Prefix
set_fact:
etcd_backup_prefix: '/var/backups'
- name: Set Backup Directory - name: Set Backup Directory
set_fact: set_fact:
etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}" etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}"

View file

@ -1,5 +0,0 @@
- name: Configure defaults
debug:
msg: "Check roles/kargo-defaults/defaults/main.yml"
tags:
- always

View file

@ -1,23 +1,23 @@
# Versions # Versions
kubedns_version: 1.9 kubedns_version : 1.14.2
kubednsmasq_version: 1.3 kubednsautoscaler_version: 1.1.1
exechealthz_version: 1.1
# Limits for dnsmasq/kubedns apps # Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi dns_memory_limit: 170Mi
dns_cpu_requests: 70m dns_cpu_requests: 100m
dns_memory_requests: 50Mi dns_memory_requests: 70Mi
kubedns_min_replicas: 1 kubedns_min_replicas: 2
kubedns_nodes_per_replica: 10 kubedns_nodes_per_replica: 10
# Images # Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
kubedns_image_tag: "{{ kubedns_version }}" kubedns_image_tag: "{{ kubedns_version }}"
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64" dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}" dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64" dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}" dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
# Netchecker # Netchecker
deploy_netchecker: false deploy_netchecker: false
@ -40,3 +40,4 @@ netchecker_server_memory_requests: 64M
# SSL # SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl" etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs" canal_cert_dir: "/etc/canal/certs"

View file

@ -1,7 +1,7 @@
--- ---
- name: Kubernetes Apps | Wait for kube-apiserver - name: Kubernetes Apps | Wait for kube-apiserver
uri: uri:
url: http://localhost:8080/healthz url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
@ -13,8 +13,8 @@
src: "{{item.file}}" src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {name: kubedns, file: kubedns-deploy.yml, type: deployment} - {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
- {name: kubedns, file: kubedns-svc.yml, type: svc} - {name: kube-dns, file: kubedns-svc.yml, type: svc}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
register: manifests register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]

View file

@ -32,7 +32,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -42,7 +42,7 @@ spec:
- --namespace=kube-system - --namespace=kube-system
- --configmap=kubedns-autoscaler - --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kubedns - --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2

View file

@ -1,25 +1,39 @@
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Deployment kind: Deployment
metadata: metadata:
name: kubedns name: kube-dns
namespace: {{ system_namespace }} namespace: "{{system_namespace}}"
labels: labels:
k8s-app: kubedns k8s-app: kube-dns
version: v19
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec: spec:
replicas: {{ kubedns_min_replicas }} # replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector: selector:
matchLabels: matchLabels:
k8s-app: kubedns k8s-app: kube-dns
version: v19
template: template:
metadata: metadata:
labels: labels:
k8s-app: kubedns k8s-app: kube-dns
version: v19 annotations:
kubernetes.io/cluster-service: "true" scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers: containers:
- name: kubedns - name: kubedns
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}" image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
@ -30,15 +44,14 @@ spec:
# guaranteed class. Currently, this container falls into the # guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it. # "burstable" category so the kubelet doesn't backoff from restarting it.
limits: limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }} memory: {{ dns_memory_limit }}
requests: requests:
cpu: {{ dns_cpu_requests }} cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }} memory: {{ dns_memory_requests }}
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthcheck/kubedns
port: 8080 port: 10054
scheme: HTTP scheme: HTTP
initialDelaySeconds: 60 initialDelaySeconds: 60
timeoutSeconds: 5 timeoutSeconds: 5
@ -51,13 +64,16 @@ spec:
scheme: HTTP scheme: HTTP
# we poll on pod startup for the Kubernetes master service and # we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available. # only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30 initialDelaySeconds: 3
timeoutSeconds: 5 timeoutSeconds: 5
args: args:
# command = "/kube-dns"
- --domain={{ dns_domain }}. - --domain={{ dns_domain }}.
- --dns-port=10053 - --dns-port=10053
- --config-dir=/kube-dns-config
- --v={{ kube_log_level }} - --v={{ kube_log_level }}
env:
- name: PROMETHEUS_PORT
value: "10055"
ports: ports:
- containerPort: 10053 - containerPort: 10053
name: dns-local name: dns-local
@ -65,25 +81,36 @@ spec:
- containerPort: 10053 - containerPort: 10053
name: dns-tcp-local name: dns-tcp-local
protocol: TCP protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}" image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }} imagePullPolicy: {{ k8s_image_pull_policy }}
resources: livenessProbe:
limits: httpGet:
cpu: {{ dns_cpu_limit }} path: /healthcheck/dnsmasq
memory: {{ dns_memory_limit }} port: 10054
requests: scheme: HTTP
cpu: {{ dns_cpu_requests }} initialDelaySeconds: 60
memory: {{ dns_memory_requests }} timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args: args:
- --log-facility=- - -v={{ kube_log_level }}
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000 - --cache-size=1000
- --no-resolv - --log-facility=-
- --server=127.0.0.1#10053 - --server=/{{ dns_domain }}/127.0.0.1#10053
{% if kube_log_level == '4' %} - --server=/in-addr.arpa/127.0.0.1#10053
- --log-queries - --server=/ip6.arpa/127.0.0.1#10053
{% endif %}
- --local=/{{ bogus_domains }}
ports: ports:
- containerPort: 53 - containerPort: 53
name: dns name: dns
@ -91,26 +118,37 @@ spec:
- containerPort: 53 - containerPort: 53
name: dns-tcp name: dns-tcp
protocol: TCP protocol: TCP
- name: healthz # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources: resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 50Mi
requests: requests:
cpu: 10m cpu: 150m
# Note that this container shouldn't really need 50Mi of memory. The memory: 20Mi
# limits are set higher than expected pending investigation on #29688. volumeMounts:
# The extra memory was stolen from the kubedns container to keep the - name: kube-dns-config
# net memory requested by the pod constant. mountPath: /etc/k8s/dns/dnsmasq-nanny
memory: 50Mi - name: sidecar
image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}"
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args: args:
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null - --v={{ kube_log_level }}
- -port=8080 - --logtostderr
- -quiet - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,A
ports: ports:
- containerPort: 8080 - containerPort: 10054
name: metrics
protocol: TCP protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS. dnsPolicy: Default # Don't use cluster DNS.

View file

@ -1,15 +1,16 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: kubedns name: kube-dns
namespace: {{ system_namespace }} namespace: {{ system_namespace }}
labels: labels:
k8s-app: kubedns k8s-app: kube-dns
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
kubernetes.io/name: "kubedns" addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec: spec:
selector: selector:
k8s-app: kubedns k8s-app: kube-dns
clusterIP: {{ skydns_server }} clusterIP: {{ skydns_server }}
ports: ports:
- name: dns - name: dns
@ -18,3 +19,4 @@ spec:
- name: dns-tcp - name: dns-tcp
port: 53 port: 53
protocol: TCP protocol: TCP

View file

@ -39,7 +39,7 @@
- name: Master | wait for the apiserver to be running - name: Master | wait for the apiserver to be running
uri: uri:
url: http://localhost:8080/healthz url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 20 retries: 20

View file

@ -5,7 +5,7 @@ metadata:
namespace: {{system_namespace}} namespace: {{system_namespace}}
labels: labels:
k8s-app: kube-apiserver k8s-app: kube-apiserver
kargo: v2 kubespray: v2
spec: spec:
hostNetwork: true hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
@ -92,7 +92,7 @@ spec:
httpGet: httpGet:
host: 127.0.0.1 host: 127.0.0.1
path: /healthz path: /healthz
port: 8080 port: {{ kube_apiserver_insecure_port }}
initialDelaySeconds: 30 initialDelaySeconds: 30
timeoutSeconds: 10 timeoutSeconds: 10
volumeMounts: volumeMounts:
@ -124,4 +124,4 @@ spec:
- hostPath: - hostPath:
path: /etc/ssl/certs/ca-bundle.crt path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle name: rhel-ca-bundle
{% endif %} {% endif %}

View file

@ -22,12 +22,15 @@ dependencies:
file: "{{ downloads.netcheck_agent }}" file: "{{ downloads.netcheck_agent }}"
when: deploy_netchecker when: deploy_netchecker
tags: [download, netchecker] tags: [download, netchecker]
- role: download
file: "{{ downloads.kubednsmasq }}"
tags: [download, dnsmasq]
- role: download - role: download
file: "{{ downloads.kubedns }}" file: "{{ downloads.kubedns }}"
tags: [download, dnsmasq] tags: [download, dnsmasq]
- role: download - role: download
file: "{{ downloads.exechealthz }}" file: "{{ downloads.dnsmasq_nanny }}"
tags: [download, dnsmasq] tags: [download, dnsmasq]
- role: download
file: "{{ downloads.dnsmasq_sidecar }}"
tags: [download, dnsmasq]
- role: download
file: "{{ downloads.kubednsautoscaler }}"
tags: [download, dnsmasq]

View file

@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
# For the vsphere integration, kubelet will need credentials to access # For the vsphere integration, kubelet will need credentials to access
# vsphere apis # vsphere apis
# Documentation regarting these values can be found # Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 # https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('')
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs # for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
# All inventory hostnames will be written into each /etc/hosts file.
populate_inventory_to_hosts_file: true

View file

@ -1,9 +1,9 @@
--- ---
# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x # These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x
# or when changing resolvconf_mode) # or when changing resolvconf_mode)
- name: Remove kargo specific config from dhclient config - name: Remove kubespray specific config from dhclient config
blockinfile: blockinfile:
dest: "{{dhclientconffile}}" dest: "{{dhclientconffile}}"
state: absent state: absent
@ -13,7 +13,7 @@
when: dhclientconffile is defined when: dhclientconffile is defined
notify: Preinstall | restart network notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook - name: Remove kubespray specific dhclient hook
file: file:
path: "{{ dhclienthookfile }}" path: "{{ dhclienthookfile }}"
state: absent state: absent

View file

@ -9,6 +9,7 @@
create: yes create: yes
backup: yes backup: yes
marker: "# Ansible inventory hosts {mark}" marker: "# Ansible inventory hosts {mark}"
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file - name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile: lineinfile:

View file

@ -0,0 +1,5 @@
- name: Configure defaults
debug:
msg: "Check roles/kubespray-defaults/defaults/main.yml"
tags:
- always

View file

@ -4,6 +4,7 @@ nat_outgoing: true
# Use IP-over-IP encapsulation across hosts # Use IP-over-IP encapsulation across hosts
ipip: true ipip: true
ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
# Set to true if you want your calico cni binaries to overwrite the # Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact. # ones from hyperkube while leaving other cni plugins intact.

View file

@ -94,7 +94,7 @@
shell: > shell: >
echo '{ echo '{
"kind": "ipPool", "kind": "ipPool",
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}}, "spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
"apiVersion": "v1", "apiVersion": "v1",
"metadata": {"cidr": "{{ kube_pods_subnet }}"} "metadata": {"cidr": "{{ kube_pods_subnet }}"}

View file

@ -83,6 +83,15 @@
- /etc/dhcp/dhclient.d/zdnsupdate.sh - /etc/dhcp/dhclient.d/zdnsupdate.sh
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
- "{{ bin_dir }}/kubelet" - "{{ bin_dir }}/kubelet"
- "{{ bin_dir }}/kubernetes-scripts"
- /run/flannel
- /etc/flannel
- /run/kubernetes
- /usr/local/share/ca-certificates/kube-ca.crt
- /usr/local/share/ca-certificates/etcd-ca.crt
- /etc/ssl/certs/kube-ca.pem
- /etc/ssl/certs/etcd-ca.pem
- /var/log/pods/
tags: ['files'] tags: ['files']

View file

@ -3,4 +3,5 @@
- name: Uncordon node - name: Uncordon node
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning|default(false) when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )

View file

@ -7,11 +7,11 @@
- set_fact: - set_fact:
needs_cordoning: >- needs_cordoning: >-
{% if " Ready" in kubectl_nodes.stdout %} {% if " Ready" in kubectl_nodes.stdout -%}
true true
{% else %} {%- else -%}
false false
{% endif %} {%- endif %}
- name: Cordon node - name: Cordon node
command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}"

View file

@ -1,6 +1,6 @@
--- ---
- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first - name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}"
command: "cat {{ vault_cert_dir }}/ca.pem" command: "cat {{ vault_cert_dir }}/ca.pem"
register: vault_cert_file_cat register: vault_cert_file_cat
delegate_to: "{{ groups['vault']|first }}" delegate_to: "{{ groups['vault']|first }}"

View file

@ -26,7 +26,7 @@
mode: "{{ issue_cert_dir_mode | d('0755') }}" mode: "{{ issue_cert_dir_mode | d('0755') }}"
owner: "{{ issue_cert_file_owner | d('root') }}" owner: "{{ issue_cert_file_owner | d('root') }}"
- name: issue_cert | Generate the cert - name: "issue_cert | Generate the cert for {{ issue_cert_role }}"
uri: uri:
url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}" url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}"
headers: "{{ issue_cert_headers }}" headers: "{{ issue_cert_headers }}"
@ -40,7 +40,7 @@
register: issue_cert_result register: issue_cert_result
when: inventory_hostname == issue_cert_hosts|first when: inventory_hostname == issue_cert_hosts|first
- name: issue_cert | Copy the cert to all hosts - name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
copy: copy:
content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}" content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}"
dest: "{{ issue_cert_path }}" dest: "{{ issue_cert_path }}"
@ -48,7 +48,7 @@
mode: "{{ issue_cert_file_mode | d('0644') }}" mode: "{{ issue_cert_file_mode | d('0644') }}"
owner: "{{ issue_cert_file_owner | d('root') }}" owner: "{{ issue_cert_file_owner | d('root') }}"
- name: issue_cert | Copy the key to all hosts - name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts"
copy: copy:
content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}" content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}"
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}"

View file

@ -28,7 +28,7 @@
state: directory state: directory
when: inventory_hostname not in sync_file_srcs when: inventory_hostname not in sync_file_srcs
- name: "sync_file | Copy the file to hosts that don't have it" - name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it"
copy: copy:
content: "{{ sync_file_contents }}" content: "{{ sync_file_contents }}"
dest: "{{ sync_file_path }}" dest: "{{ sync_file_path }}"
@ -37,7 +37,7 @@
owner: "{{ sync_file_owner|d('root') }}" owner: "{{ sync_file_owner|d('root') }}"
when: inventory_hostname not in sync_file_srcs when: inventory_hostname not in sync_file_srcs
- name: "sync_file | Copy the key file to hosts that don't have it" - name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it"
copy: copy:
content: "{{ sync_file_key_contents }}" content: "{{ sync_file_key_contents }}"
dest: "{{ sync_file_key_path }}" dest: "{{ sync_file_key_path }}"

View file

@ -19,12 +19,12 @@
when: >- when: >-
sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '') sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '')
- name: "sync_file | Check if file exists" - name: "sync_file | Check if {{sync_file_path}} file exists"
stat: stat:
path: "{{ sync_file_path }}" path: "{{ sync_file_path }}"
register: sync_file_stat register: sync_file_stat
- name: "sync_file | Check if key file exists" - name: "sync_file | Check if {{ sync_file_key_path }} key file exists"
stat: stat:
path: "{{ sync_file_key_path }}" path: "{{ sync_file_key_path }}"
register: sync_file_key_stat register: sync_file_key_stat

View file

@ -7,7 +7,7 @@
vars: vars:
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
##We still have to gather facts about our masters and etcd nodes ##We still have to gather facts about our masters and etcd nodes
@ -21,7 +21,7 @@
- hosts: kube-node - hosts: kube-node
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker }

View file

@ -30,7 +30,7 @@
credentials_file: "{{gce_credentials_file | default(omit)}}" credentials_file: "{{gce_credentials_file | default(omit)}}"
project_id: "{{ gce_project_id }}" project_id: "{{ gce_project_id }}"
zone: "{{cloud_region}}" zone: "{{cloud_region}}"
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}"}' metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script}}"}'
tags: "build-{{test_name}},{{kube_network_plugin}}" tags: "build-{{test_name}},{{kube_network_plugin}}"
register: gce register: gce
@ -52,5 +52,5 @@
when: mode in ['scale', 'separate-scale', 'ha-scale'] when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Wait for SSH to come up - name: Wait for SSH to come up
wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started wait_for: host={{item.public_ip}} port=22 delay=30 timeout=180 state=started
with_items: "{{gce.instance_data}}" with_items: "{{gce.instance_data}}"

View file

@ -2,7 +2,7 @@
- hosts: localhost - hosts: localhost
gather_facts: False gather_facts: False
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -13,7 +13,7 @@
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
@ -25,7 +25,7 @@
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker }
@ -36,25 +36,25 @@
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: kubespray-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd - hosts: etcd
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true } - { role: etcd, tags: etcd, etcd_cluster_setup: true }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false } - { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'"} - { role: kubespray-defaults, when: "cert_management == 'vault'"}
- { role: vault, tags: vault, when: "cert_management == 'vault'"} - { role: vault, tags: vault, when: "cert_management == 'vault'"}
#Handle upgrades to master components first to maintain backwards compat. #Handle upgrades to master components first to maintain backwards compat.
@ -62,46 +62,47 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1 serial: 1
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size #Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master - hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}" serial: "{{ serial | default('20%') }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kargo-defaults} - { role: kubespray-defaults}
- hosts: kube-master - hosts: kube-master
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network } - { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
- hosts: kube-master[0] - hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kargo-defaults} - { role: kubespray-defaults}
- { role: kubernetes-apps, tags: apps } - { role: kubernetes-apps, tags: apps }