Merge pull request #2233 from hswong3i/multiple_inventory_dir

Support multiple inventory files under individual inventory directory
This commit is contained in:
Antoine Legrand 2018-02-08 11:57:04 +01:00 committed by GitHub
commit 57e7a5a34a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 162 additions and 134 deletions

View file

@ -87,7 +87,7 @@ before_script:
-e gce_credentials_file=${HOME}/.ssh/gce.json -e gce_credentials_file=${HOME}/.ssh/gce.json
-e gce_project_id=${GCE_PROJECT_ID} -e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT} -e gce_service_account_email=${GCE_ACCOUNT}
-e inventory_path=${PWD}/inventory/inventory.ini -e inventory_path=${PWD}/inventory/sample/hosts.ini
-e test_id=${TEST_ID} -e test_id=${TEST_ID}
-e preemptible=$GCE_PREEMPTIBLE -e preemptible=$GCE_PREEMPTIBLE
@ -104,7 +104,7 @@ before_script:
# Create cluster # Create cluster
- > - >
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -124,7 +124,7 @@ before_script:
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}"; git checkout "${CI_BUILD_REF}";
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -141,20 +141,20 @@ before_script:
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
- > - >
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod ## Ping the between 2 pod
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks ## Advanced DNS checks
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment) ## Idempotency checks 1/5 (repeat deployment)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -171,7 +171,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -186,7 +186,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -203,7 +203,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/inventory.ini -i inventory/sample/hosts.ini
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -219,7 +219,7 @@ before_script:
## Idempotency checks 5/5 (Advanced DNS checks) ## Idempotency checks 5/5 (Advanced DNS checks)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@ -227,13 +227,13 @@ before_script:
after_script: after_script:
- > - >
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL ansible-playbook -i inventory/sample/hosts.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e @${CI_TEST_VARS} -e @${CI_TEST_VARS}
-e test_id=${TEST_ID} -e test_id=${TEST_ID}
-e gce_project_id=${GCE_PROJECT_ID} -e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT} -e gce_service_account_email=${GCE_ACCOUNT}
-e gce_credentials_file=${HOME}/.ssh/gce.json -e gce_credentials_file=${HOME}/.ssh/gce.json
-e inventory_path=${PWD}/inventory/inventory.ini -e inventory_path=${PWD}/inventory/sample/hosts.ini
# Test matrix. Leave the comments for markup scripts. # Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables .coreos_calico_aio_variables: &coreos_calico_aio_variables

177
README.md
View file

@ -1,67 +1,89 @@
![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png) ![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
## Deploy a production ready kubernetes cluster Deploy a Production Ready Kubernetes Cluster
============================================
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **#kubespray**. If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** - Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster - **High available** cluster
- **Composable** (Choice of the network plugin for instance) - **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions** - Support most popular **Linux distributions**
- **Continuous integration tests** - **Continuous integration tests**
Quick Start
-----------
To deploy the cluster you can use : To deploy the cluster you can use :
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br> ### Ansible
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
* [Requirements](#requirements) # Update Ansible inventory file with inventory builder
* [Kubespray vs ...](docs/comparisons.md) declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
* [Getting started](docs/getting-started.md) CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
* [Ansible inventory and tags](docs/ansible.md)
* [Integration with existing ansible repo](docs/integration.md)
* [Deployment data variables](docs/vars.md)
* [DNS stack](docs/dns-stack.md)
* [HA mode](docs/ha-mode.md)
* [Network plugins](#network-plugins)
* [Vagrant install](docs/vagrant.md)
* [CoreOS bootstrap](docs/coreos.md)
* [Debian Jessie setup](docs/debian.md)
* [Downloaded artifacts](docs/downloads.md)
* [Cloud providers](docs/cloud.md)
* [OpenStack](docs/openstack.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
* [vSphere](docs/vsphere.md)
* [Large deployments](docs/large-deployments.md)
* [Upgrades basics](docs/upgrades.md)
* [Roadmap](docs/roadmap.md)
Supported Linux distributions # Review and change parameters under ``inventory/mycluster/group_vars``
=============== cat inventory/mycluster/group_vars/all.yml
cat inventory/mycluster/group_vars/k8s-cluster.yml
* **Container Linux by CoreOS** # Deploy Kubespray with Ansible Playbook
* **Debian** Jessie ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
* **Ubuntu** 16.04
* **CentOS/RHEL** 7 ### Vagrant
# Simply running `vagrant up` (for tests purposes)
vagrant up
Documents
---------
- [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md)
- [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md)
- [DNS stack](docs/dns-stack.md)
- [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md)
- [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md)
- [AWS](docs/aws.md)
- [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md)
- [Large deployments](docs/large-deployments.md)
- [Upgrades basics](docs/upgrades.md)
- [Roadmap](docs/roadmap.md)
Supported Linux Distributions
-----------------------------
- **Container Linux by CoreOS**
- **Debian** Jessie
- **Ubuntu** 16.04
- **CentOS/RHEL** 7
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
Versions of supported components Versions of supported components
-------------------------------- --------------------------------
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 <br> - [etcd](https://github.com/coreos/etcd/releases) v3.2.4
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br> - [flanneld](https://github.com/coreos/flannel/releases) v0.8.0
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br> - [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br> - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br> - [contiv](https://github.com/contiv/install/releases) v1.0.3
[contiv](https://github.com/contiv/install/releases) v1.0.3 <br> - [weave](http://weave.works/) v2.0.1
[weave](http://weave.works/) v2.0.1 <br> - [docker](https://www.docker.com/) v1.13 (see note)
[docker](https://www.docker.com/) v1.13 (see note)<br> - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
@ -71,54 +93,59 @@ plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster. plugins can be deployed for a given single cluster.
Requirements Requirements
-------------- ------------
* **Ansible v2.4 (or newer) and python-netaddr is installed on the machine - **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands** that will run Ansible commands**
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks** - **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
* The target servers must have **access to the Internet** in order to pull docker images. - The target servers must have **access to the Internet** in order to pull docker images.
* The target servers are configured to allow **IPv4 forwarding**. - The target servers are configured to allow **IPv4 forwarding**.
* **Your ssh key must be copied** to all the servers part of your inventory. - **Your ssh key must be copied** to all the servers part of your inventory.
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
Network Plugins
## Network plugins ---------------
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`) You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking. - [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
* [**calico**](docs/calico.md): bgp (layer 3) networking. - [calico](docs/calico.md): bgp (layer 3) networking.
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins. - [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
* [**contiv**](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to - [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks. apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br> - [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)). (Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
The choice is defined with the variable `kube_network_plugin`. There is also an The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead. option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md). See also [Network checker](docs/netcheck.md).
## Community docs and resources Community docs and resources
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/) ----------------------------
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## Tools and projects on top of Kubespray - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst) - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer) - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform) - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## CI Tests Tools and projects on top of Kubespray
--------------------------------------
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
CI Tests
--------
![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png) ![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br> [![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack). CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
See the [test matrix](docs/test_cases.md) for details. See the [test matrix](docs/test_cases.md) for details.

View file

@ -59,6 +59,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell ```shell
$ cd kubespray-root-dir $ cd kubespray-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml $ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
``` ```

View file

@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
``` ```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
``` ```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute: This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
``` ```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
``` ```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM: If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:

View file

@ -47,10 +47,10 @@ export SKIP_PIP_INSTALL=1
%files %files
%doc %{_docdir}/%{name}/README.md %doc %{_docdir}/%{name}/README.md
%doc %{_docdir}/%{name}/inventory/inventory.example %doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg %config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/group_vars/all.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/group_vars/k8s-cluster.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%license %{_docdir}/%{name}/LICENSE %license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/ %{_datarootdir}/%{name}/roles/

View file

@ -200,7 +200,7 @@ if it fails try to connect manually via SSH ... it could be something as simple
## Configure Cluster variables ## Configure Cluster variables
Edit`inventory/group_vars/all.yml`: Edit `inventory/sample/group_vars/all.yml`:
- Set variable **bootstrap_os** according selected image - Set variable **bootstrap_os** according selected image
``` ```
# Valid bootstrap options (required): ubuntu, coreos, centos, none # Valid bootstrap options (required): ubuntu, coreos, centos, none
@ -218,7 +218,7 @@ bin_dir: /opt/bin
``` ```
cloud_provider: openstack cloud_provider: openstack
``` ```
Edit`inventory/group_vars/k8s-cluster.yml`: Edit `inventory/sample/group_vars/k8s-cluster.yml`:
- Set variable **kube_network_plugin** according selected networking - Set variable **kube_network_plugin** according selected networking
``` ```
# Choose network plugin (calico, weave or flannel) # Choose network plugin (calico, weave or flannel)

View file

@ -27,7 +27,7 @@ not _kube-node_.
There are also two special groups: There are also two special groups:
* **calico-rr** : explained for [advanced Calico networking cases](calico.md) * **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable * **bastion** : configure a bastion host if your nodes are not directly reachable
Below is a complete inventory example: Below is a complete inventory example:
@ -66,10 +66,10 @@ kube-master
Group vars and overriding variables precedence Group vars and overriding variables precedence
---------------------------------------------- ----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/group_vars``. The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
Optional variables are located in the `inventory/group_vars/all.yml`. Optional variables are located in the `inventory/sample/group_vars/all.yml`.
Mandatory variables that are common for at least one role (or a node group) can be found in the Mandatory variables that are common for at least one role (or a node group) can be found in the
`inventory/group_vars/k8s-cluster.yml`. `inventory/sample/group_vars/k8s-cluster.yml`.
There are also role vars for docker, rkt, kubernetes preinstall and master roles. There are also role vars for docker, rkt, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overriden from the group vars. In order to override, one should use those cannot be overriden from the group vars. In order to override, one should use
@ -153,16 +153,16 @@ Example command to filter and apply only DNS configuration tasks and skip
everything else related to host OS configuration and downloading images of containers: everything else related to host OS configuration and downloading images of containers:
``` ```
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
``` ```
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files: And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
``` ```
ansible-playbook -i inventory/inventory.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
``` ```
And this prepares all container images localy (at the ansible runner node) without installing And this prepares all container images localy (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes: or upgrading related stuff or trying to upload container to K8s cluster nodes:
``` ```
ansible-playbook -i inventory/inventory.ini cluster.yml \ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
-e download_run_once=true -e download_localhost=true \ -e download_run_once=true -e download_localhost=true \
--tags download --skip-tags upload,upgrade --tags download --skip-tags upload,upgrade
``` ```

View file

@ -6,7 +6,7 @@ Building your own inventory
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
an example inventory located an example inventory located
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example). [here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
You can use an You can use an
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) [inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
@ -19,9 +19,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
Example inventory generator usage: Example inventory generator usage:
``` ```
cp -r inventory my_inventory cp -r inventory/sample inventory/mycluster
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]} CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
``` ```
Starting custom deployment Starting custom deployment
@ -33,7 +33,7 @@ and start the deployment:
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars** **IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
``` ```
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \ ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
--private-key=~/.ssh/private_key --private-key=~/.ssh/private_key
``` ```
@ -47,7 +47,7 @@ You may want to add **worker** nodes to your existing cluster. This can be done
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). - Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`: - Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
``` ```
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \ ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
--private-key=~/.ssh/private_key --private-key=~/.ssh/private_key
``` ```

View file

@ -24,13 +24,13 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
deploy the following way: deploy the following way:
``` ```
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3 ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
``` ```
And then repeat with v1.4.6 as kube_version: And then repeat with v1.4.6 as kube_version:
``` ```
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6 ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
``` ```
#### Graceful upgrade #### Graceful upgrade
@ -44,7 +44,7 @@ deployed.
``` ```
git fetch origin git fetch origin
git checkout origin/master git checkout origin/master
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0 ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
``` ```
After a successul upgrade, the Server Version should be updated: After a successul upgrade, the Server Version should be updated:

View file

@ -16,7 +16,7 @@ After this step you should have:
## Kubespray configuration ## Kubespray configuration
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`. Fist you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
```yml ```yml
cloud_provider: vsphere cloud_provider: vsphere
``` ```
@ -58,7 +58,7 @@ vsphere_resource_pool: "K8s-Pool"
Once the configuration is set, you can execute the playbook again to apply the new configuration Once the configuration is set, you can execute the playbook again to apply the new configuration
``` ```
cd kubespray cd kubespray
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
``` ```
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration. You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.

View file

@ -12,7 +12,7 @@ Weave encryption is supported for all communication
* To use Weave encryption, specify a strong password (if no password, no encrytion) * To use Weave encryption, specify a strong password (if no password, no encrytion)
``` ```
# In file ./inventory/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_password: EnterPasswordHere weave_password: EnterPasswordHere
``` ```
@ -77,14 +77,14 @@ The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters depl
* Switch from consensus mode to seed mode * Switch from consensus mode to seed mode
``` ```
# In file ./inventory/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_mode_seed: true weave_mode_seed: true
``` ```
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**) These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
``` ```
# In file ./inventory/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_seed: uninitialized weave_seed: uninitialized
weave_peers: uninitialized weave_peers: uninitialized
``` ```

1
inventory/local/group_vars Symbolic link
View file

@ -0,0 +1 @@
../sample/group_vars

View file

@ -96,8 +96,8 @@ bin_dir: /usr/local/bin
## Uncomment to enable experimental kubeadm deployment mode ## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false #kubeadm_enabled: false
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" #kubeadm_token_first: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" #kubeadm_token_second: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" #kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
# #
## Set these proxy values in order to update package manager and docker daemon to use proxies ## Set these proxy values in order to update package manager and docker daemon to use proxies

View file

@ -37,7 +37,7 @@ kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP # Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user # Optionally add groups for user
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_users: kube_users:
kube: kube:
pass: "{{kube_api_pwd}}" pass: "{{kube_api_pwd}}"

View file

@ -33,7 +33,7 @@
- name: Weave seed | Save seed - name: Weave seed | Save seed
lineinfile: lineinfile:
dest: "./inventory/group_vars/k8s-cluster.yml" dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
state: present state: present
regexp: '^weave_seed:' regexp: '^weave_seed:'
line: 'weave_seed: {{ seed }}' line: 'weave_seed: {{ seed }}'
@ -45,7 +45,7 @@
- name: Weave seed | Save peers - name: Weave seed | Save peers
lineinfile: lineinfile:
dest: "./inventory/group_vars/k8s-cluster.yml" dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
state: present state: present
regexp: '^weave_peers:' regexp: '^weave_peers:'
line: 'weave_peers: {{ peers }}' line: 'weave_peers: {{ peers }}'

View file

@ -115,7 +115,7 @@ vault_pki_mounts:
roles: roles:
- name: vault - name: vault
group: vault group: vault
password: "{{ lookup('password', 'credentials/vault/vault length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}"
policy_rules: default policy_rules: default
role_options: default role_options: default
etcd: etcd:
@ -127,7 +127,7 @@ vault_pki_mounts:
roles: roles:
- name: etcd - name: etcd
group: etcd group: etcd
password: "{{ lookup('password', 'credentials/vault/etcd length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}"
policy_rules: default policy_rules: default
role_options: role_options:
allow_any_name: true allow_any_name: true
@ -142,7 +142,7 @@ vault_pki_mounts:
roles: roles:
- name: kube-master - name: kube-master
group: kube-master group: kube-master
password: "{{ lookup('password', 'credentials/vault/kube-master length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}"
policy_rules: default policy_rules: default
role_options: role_options:
allow_any_name: true allow_any_name: true
@ -150,7 +150,7 @@ vault_pki_mounts:
organization: "system:masters" organization: "system:masters"
- name: kube-node - name: kube-node
group: k8s-cluster group: k8s-cluster
password: "{{ lookup('password', 'credentials/vault/kube-node length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}"
policy_rules: default policy_rules: default
role_options: role_options:
allow_any_name: true allow_any_name: true
@ -158,7 +158,7 @@ vault_pki_mounts:
organization: "system:nodes" organization: "system:nodes"
- name: kube-proxy - name: kube-proxy
group: k8s-cluster group: k8s-cluster
password: "{{ lookup('password', 'credentials/vault/kube-proxy length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
policy_rules: default policy_rules: default
role_options: role_options:
allow_any_name: true allow_any_name: true

View file

@ -32,12 +32,12 @@ data_files =
LICENSE LICENSE
README.md README.md
/usr/share/doc/kubespray/inventory/ = /usr/share/doc/kubespray/inventory/ =
inventory/inventory.example inventory/sample/hosts.ini
/etc/kubespray/ = /etc/kubespray/ =
ansible.cfg ansible.cfg
/etc/kubespray/inventory/group_vars/ = /etc/kubespray/inventory/sample/group_vars/ =
inventory/group_vars/all.yml inventory/sample/group_vars/all.yml
inventory/group_vars/k8s-cluster.yml inventory/sample/group_vars/k8s-cluster.yml
[wheel] [wheel]
universal = 1 universal = 1

View file

@ -1,5 +1,5 @@
def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) { def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
def inventory_path = pwd() + "/inventory/inventory-test.ini" def inventory_path = pwd() + "/inventory/sample/hosts.ini"
dir('tests') { dir('tests') {
wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
try { try {

View file

@ -6,7 +6,7 @@
uri: uri:
url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1"
user: kube user: kube
password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}" password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
validate_certs: no validate_certs: no
status_code: 200,401 status_code: 200,401
when: not kubeadm_enabled|default(false) when: not kubeadm_enabled|default(false)