Merge branch 'master' of github.com:kubernetes-incubator/kubespray
This commit is contained in:
commit
af5943f7e6
298 changed files with 3642 additions and 1651 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -1,6 +1,7 @@
|
||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
|
inventory/credentials/
|
||||||
inventory/group_vars/fake_hosts.yml
|
inventory/group_vars/fake_hosts.yml
|
||||||
inventory/host_vars/
|
inventory/host_vars/
|
||||||
temp
|
temp
|
||||||
|
@ -23,7 +24,7 @@ __pycache__/
|
||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
.Python
|
||||||
artifacts/
|
inventory/*/artifacts/
|
||||||
env/
|
env/
|
||||||
build/
|
build/
|
||||||
credentials/
|
credentials/
|
||||||
|
|
|
@ -109,7 +109,6 @@ before_script:
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e @${CI_TEST_VARS}
|
-e @${CI_TEST_VARS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
|
@ -129,7 +128,6 @@ before_script:
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e @${CI_TEST_VARS}
|
-e @${CI_TEST_VARS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
|
@ -257,6 +255,10 @@ before_script:
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.coreos_cilium_variables: &coreos_cilium_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
@ -320,16 +322,6 @@ gce_coreos-calico-aio:
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
### PR JOBS PART2
|
### PR JOBS PART2
|
||||||
do_ubuntu-canal-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons:
|
gce_centos7-flannel-addons:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
|
@ -363,7 +355,6 @@ gce_coreos-calico-sep-triggers:
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha-triggers:
|
gce_ubuntu-canal-ha-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
|
@ -396,6 +387,16 @@ gce_ubuntu-weave-sep-triggers:
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
do_ubuntu-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *do
|
||||||
|
variables:
|
||||||
|
<<: *do_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-canal-ha:
|
gce_ubuntu-canal-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
|
@ -460,6 +461,17 @@ gce_ubuntu-contiv-sep:
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_coreos-cilium:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_cilium_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-cilium-sep:
|
gce_ubuntu-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
|
|
21
README.md
21
README.md
|
@ -5,7 +5,7 @@ Deploy a Production Ready Kubernetes Cluster
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Support most popular **Linux distributions**
|
- Support most popular **Linux distributions**
|
||||||
|
@ -66,24 +66,25 @@ Supported Linux Distributions
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
- **Container Linux by CoreOS**
|
- **Container Linux by CoreOS**
|
||||||
- **Debian** Jessie
|
- **Debian** Jessie, Stretch, Wheezy
|
||||||
- **Ubuntu** 16.04
|
- **Ubuntu** 16.04
|
||||||
- **CentOS/RHEL** 7
|
- **CentOS/RHEL** 7
|
||||||
|
- **Fedora/CentOS** Atomic
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
Versions of supported components
|
Versions of supported components
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2
|
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
|
||||||
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
||||||
- [flanneld](https://github.com/coreos/flannel/releases) v0.8.0
|
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
|
||||||
- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
|
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc4
|
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
|
||||||
- [contiv](https://github.com/contiv/install/releases) v1.0.3
|
- [contiv](https://github.com/contiv/install/releases) v1.1.7
|
||||||
- [weave](http://weave.works/) v2.0.1
|
- [weave](http://weave.works/) v2.2.1
|
||||||
- [docker](https://www.docker.com/) v1.13 (see note)
|
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||||
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
||||||
|
|
||||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
@ -150,5 +151,5 @@ CI Tests
|
||||||
|
|
||||||
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
CI/end-to-end tests sponsored by Google (GCE)
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
|
|
10
Vagrantfile
vendored
10
Vagrantfile
vendored
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 1.9.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
@ -135,12 +135,6 @@ Vagrant.configure("2") do |config|
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
# workaround for Vagrant 1.9.1 and centos vm
|
|
||||||
# https://github.com/hashicorp/vagrant/issues/8096
|
|
||||||
if Vagrant::VERSION == "1.9.1" && $os == "centos"
|
|
||||||
config.vm.provision "shell", inline: "service network restart", run: "always"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
config.vm.provision "shell", inline: "swapoff -a"
|
config.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
|
@ -164,7 +158,7 @@ Vagrant.configure("2") do |config|
|
||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
ansible.inventory_path = $inventory
|
ansible.inventory_path = $inventory
|
||||||
end
|
end
|
||||||
ansible.sudo = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||||
|
|
|
@ -12,3 +12,5 @@ library = ./library
|
||||||
callback_whitelist = profile_tasks
|
callback_whitelist = profile_tasks
|
||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||||
|
jinja2_extensions = jinja2.ext.do
|
||||||
|
|
|
@ -21,6 +21,12 @@
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
pre_tasks:
|
||||||
|
- name: gather facts from all instances
|
||||||
|
setup:
|
||||||
|
delegate_to: "{{item}}"
|
||||||
|
delegate_facts: True
|
||||||
|
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -94,6 +100,8 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
|
||||||
{% for vm in vm_ip_list %}
|
{% for vm in vm_ip_list %}
|
||||||
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
|
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
{% else %}
|
{% else %}
|
||||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
|
|
@ -20,9 +20,10 @@ BuildRequires: python2-setuptools
|
||||||
BuildRequires: python-d2to1
|
BuildRequires: python-d2to1
|
||||||
BuildRequires: python2-pbr
|
BuildRequires: python2-pbr
|
||||||
|
|
||||||
Requires: ansible
|
Requires: ansible >= 2.4.0
|
||||||
Requires: python-jinja2 >= 2.10
|
Requires: python-jinja2 >= 2.10
|
||||||
Requires: python-netaddr
|
Requires: python-netaddr
|
||||||
|
Requires: python-pbr
|
||||||
|
|
||||||
%description
|
%description
|
||||||
|
|
||||||
|
|
|
@ -17,32 +17,33 @@ to actually install kubernetes and stand up the cluster.
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
The configuration includes creating a private subnet with a router to the
|
The configuration includes creating a private subnet with a router to the
|
||||||
external net. It will allocate floating-ips from a pool and assign them to the
|
external net. It will allocate floating IPs from a pool and assign them to the
|
||||||
hosts where that makes sense. You have the option of creating bastion hosts
|
hosts where that makes sense. You have the option of creating bastion hosts
|
||||||
inside the private subnet to access the nodes there.
|
inside the private subnet to access the nodes there. Alternatively, a node with
|
||||||
|
a floating IP can be used as a jump host to nodes without.
|
||||||
|
|
||||||
### Kubernetes Nodes
|
### Kubernetes Nodes
|
||||||
You can create many different kubernetes topologies by setting the number of
|
You can create many different kubernetes topologies by setting the number of
|
||||||
different classes of hosts. For each class there are options for allocating
|
different classes of hosts. For each class there are options for allocating
|
||||||
floating ip addresses or not.
|
floating IP addresses or not.
|
||||||
- Master Nodes with etcd
|
- Master nodes with etcd
|
||||||
- Master nodes without etcd
|
- Master nodes without etcd
|
||||||
- Standalone etcd hosts
|
- Standalone etcd hosts
|
||||||
- Kubernetes worker nodes
|
- Kubernetes worker nodes
|
||||||
|
|
||||||
Note that the ansible script will report an invalid configuration if you wind up
|
Note that the Ansible script will report an invalid configuration if you wind up
|
||||||
with an even number of etcd instances since that is not a valid configuration.
|
with an even number of etcd instances since that is not a valid configuration.
|
||||||
|
|
||||||
### Gluster FS
|
### GlusterFS
|
||||||
The terraform configuration supports provisioning of an optional GlusterFS
|
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||||
shared file system based on a separate set of VMs. To enable this, you need to
|
shared file system based on a separate set of VMs. To enable this, you need to
|
||||||
specify
|
specify:
|
||||||
- the number of gluster hosts
|
- the number of Gluster hosts (minimum 2)
|
||||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||||
- Other properties related to provisioning the hosts
|
- Other properties related to provisioning the hosts
|
||||||
|
|
||||||
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
||||||
need the GlusterFS VMs to be based on either Debian or RedHat based images,
|
need the GlusterFS VMs to be based on either Debian or RedHat based images.
|
||||||
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
||||||
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
@ -50,9 +51,9 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||||
- you already have a suitable OS image in glance
|
- you already have a suitable OS image in Glance
|
||||||
- you already have a floating-ip pool created
|
- you already have a floating IP pool created
|
||||||
- you have security-groups enabled
|
- you have security groups enabled
|
||||||
- you have a pair of keys generated that can be used to secure the new hosts
|
- you have a pair of keys generated that can be used to secure the new hosts
|
||||||
|
|
||||||
## Module Architecture
|
## Module Architecture
|
||||||
|
@ -67,7 +68,7 @@ any external references to the floating IP (e.g. DNS) that would otherwise have
|
||||||
to be updated.
|
to be updated.
|
||||||
|
|
||||||
You can force your existing IPs by modifying the compute variables in
|
You can force your existing IPs by modifying the compute variables in
|
||||||
`kubespray.tf` as
|
`kubespray.tf` as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
k8s_master_fips = ["151.101.129.67"]
|
k8s_master_fips = ["151.101.129.67"]
|
||||||
|
@ -75,30 +76,42 @@ k8s_node_fips = ["151.101.129.68"]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Terraform
|
## Terraform
|
||||||
Terraform will be used to provision all of the OpenStack resources. It is also
|
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
|
||||||
used to deploy and provision the software requirements.
|
|
||||||
|
|
||||||
### Prep
|
### Configuration
|
||||||
|
|
||||||
#### OpenStack
|
#### Inventory files
|
||||||
|
|
||||||
No provider variables are hard coded inside `variables.tf` because Terraform
|
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||||
supports various authentication method for OpenStack, between identity v2 and
|
|
||||||
v3 API, `openrc` or `clouds.yaml`.
|
```ShellSession
|
||||||
|
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||||
|
$ cd inventory/$CLUSTER
|
||||||
|
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||||
|
```
|
||||||
|
|
||||||
|
This will be the base for subsequent Terraform commands.
|
||||||
|
|
||||||
|
#### OpenStack access and credentials
|
||||||
|
|
||||||
|
No provider variables are hardcoded inside `variables.tf` because Terraform
|
||||||
|
supports various authentication methods for OpenStack: the older script and
|
||||||
|
environment method (using `openrc`) as well as a newer declarative method, and
|
||||||
|
different OpenStack environments may support Identity API version 2 or 3.
|
||||||
|
|
||||||
These are examples and may vary depending on your OpenStack cloud provider,
|
These are examples and may vary depending on your OpenStack cloud provider,
|
||||||
for an exhaustive list on how to authenticate on OpenStack with Terraform
|
for an exhaustive list on how to authenticate on OpenStack with Terraform
|
||||||
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
|
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
|
||||||
|
|
||||||
##### Recommended method : clouds.yaml
|
##### Declarative method (recommended)
|
||||||
|
|
||||||
Newer recommended authentication method is to use a `clouds.yaml` file that can be store in :
|
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
|
||||||
|
|
||||||
* `Current Directory`
|
* the current directory
|
||||||
* `~/.config/openstack`
|
* `~/.config/openstack`
|
||||||
* `/etc/openstack`
|
* `/etc/openstack`
|
||||||
|
|
||||||
`clouds.yaml` :
|
`clouds.yaml`:
|
||||||
|
|
||||||
```
|
```
|
||||||
clouds:
|
clouds:
|
||||||
|
@ -116,18 +129,19 @@ clouds:
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
||||||
the one you want to use with the environment variable `OS_CLOUD` :
|
the one you want to use with the environment variable `OS_CLOUD`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export OS_CLOUD=mycloud
|
export OS_CLOUD=mycloud
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Deprecated method : openrc
|
##### Openrc method (deprecated)
|
||||||
|
|
||||||
When using classic environment variables, Terraform uses default `OS_*`
|
When using classic environment variables, Terraform uses default `OS_*`
|
||||||
environment variables :
|
environment variables. A script suitable for your environment may be available
|
||||||
|
from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
|
||||||
|
|
||||||
With identity v2 :
|
With identity v2:
|
||||||
|
|
||||||
```
|
```
|
||||||
source openrc
|
source openrc
|
||||||
|
@ -144,7 +158,7 @@ OS_INTERFACE=public
|
||||||
OS_IDENTITY_API_VERSION=2
|
OS_IDENTITY_API_VERSION=2
|
||||||
```
|
```
|
||||||
|
|
||||||
With identity v3 :
|
With identity v3:
|
||||||
|
|
||||||
```
|
```
|
||||||
source openrc
|
source openrc
|
||||||
|
@ -164,7 +178,7 @@ OS_USER_DOMAIN_NAME=Default
|
||||||
```
|
```
|
||||||
|
|
||||||
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
||||||
other :
|
other:
|
||||||
|
|
||||||
```
|
```
|
||||||
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||||
|
@ -180,14 +194,11 @@ unset OS_PROJECT_DOMAIN_ID
|
||||||
set OS_PROJECT_DOMAIN_NAME=Default
|
set OS_PROJECT_DOMAIN_NAME=Default
|
||||||
```
|
```
|
||||||
|
|
||||||
### Terraform Variables
|
#### Cluster variables
|
||||||
The construction of the cluster is driven by values found in
|
The construction of the cluster is driven by values found in
|
||||||
[variables.tf](variables.tf).
|
[variables.tf](variables.tf).
|
||||||
|
|
||||||
The best way to set these values is to create a file in the project's root
|
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||||
directory called something like`my-terraform-vars.tfvars`. Many of the
|
|
||||||
variables are obvious. Here is a summary of some of the more interesting
|
|
||||||
ones:
|
|
||||||
|
|
||||||
|Variable | Description |
|
|Variable | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
|
@ -208,9 +219,9 @@ ones:
|
||||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||||
|
|
||||||
### Terraform files
|
#### Terraform state files
|
||||||
|
|
||||||
In the root folder, the following files might be created (either by Terraform
|
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||||
or manually), to prevent you from pushing them accidentally they are in a
|
or manually), to prevent you from pushing them accidentally they are in a
|
||||||
`.gitignore` file in the `terraform/openstack` directory :
|
`.gitignore` file in the `terraform/openstack` directory :
|
||||||
|
|
||||||
|
@ -221,49 +232,61 @@ or manually), to prevent you from pushing them accidentally they are in a
|
||||||
|
|
||||||
You can still add them manually if you want to.
|
You can still add them manually if you want to.
|
||||||
|
|
||||||
## Initializing Terraform
|
### Initialization
|
||||||
|
|
||||||
Before Terraform can operate on your cluster you need to install required
|
Before Terraform can operate on your cluster you need to install the required
|
||||||
plugins. This is accomplished with the command
|
plugins. This is accomplished as follows:
|
||||||
|
|
||||||
```bash
|
```ShellSession
|
||||||
$ terraform init contrib/terraform/openstack
|
$ cd inventory/$CLUSTER
|
||||||
|
$ terraform init ../../contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
## Provisioning Cluster with Terraform
|
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||||
You can apply the terraform config to your cluster with the following command
|
|
||||||
issued from the project's root directory
|
### Provisioning cluster
|
||||||
```bash
|
You can apply the Terraform configuration to your cluster with the following command
|
||||||
$ terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||||
|
```ShellSession
|
||||||
|
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
if you chose to create a bastion host, this script will create
|
if you chose to create a bastion host, this script will create
|
||||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to
|
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||||
be able to access your machines tunneling through the bastion's ip adress. If
|
be able to access your machines tunneling through the bastion's IP address. If
|
||||||
you want to manually handle the ssh tunneling to these machines, please delete
|
you want to manually handle the ssh tunneling to these machines, please delete
|
||||||
or move that file. If you want to use this, just leave it there, as ansible will
|
or move that file. If you want to use this, just leave it there, as ansible will
|
||||||
pick it up automatically.
|
pick it up automatically.
|
||||||
|
|
||||||
|
### Destroying cluster
|
||||||
|
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||||
|
|
||||||
## Destroying Cluster with Terraform
|
```ShellSession
|
||||||
You can destroy a config deployed to your cluster with the following command
|
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||||
issued from the project's root directory
|
|
||||||
```bash
|
|
||||||
$ terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Debugging Cluster Provisioning
|
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||||
|
|
||||||
|
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||||
|
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||||
|
|
||||||
|
### Debugging
|
||||||
You can enable debugging output from Terraform by setting
|
You can enable debugging output from Terraform by setting
|
||||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before runing the terraform command
|
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
|
||||||
|
|
||||||
## Terraform output
|
### Terraform output
|
||||||
|
|
||||||
Terraform can output useful values that need to be reused if you want to use Kubernetes OpenStack cloud provider with Neutron/Octavia LBaaS or Cinder persistent Volume provisioning:
|
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
|
||||||
|
|
||||||
- `private_subnet_id`: the subnet where your instances are running, maps to `openstack_lbaas_subnet_id`
|
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||||
- `floating_network_id`: the network_id where the floating IP are provisioned, maps to `openstack_lbaas_floating_network_id`
|
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||||
|
|
||||||
|
## Ansible
|
||||||
|
|
||||||
|
### Node access
|
||||||
|
|
||||||
|
#### SSH
|
||||||
|
|
||||||
# Running the Ansible Script
|
|
||||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||||
step is required by the terraform provisioner:
|
step is required by the terraform provisioner:
|
||||||
|
|
||||||
|
@ -272,11 +295,22 @@ $ eval $(ssh-agent -s)
|
||||||
$ ssh-add ~/.ssh/id_rsa
|
$ ssh-add ~/.ssh/id_rsa
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||||
|
|
||||||
Make sure you can connect to the hosts:
|
#### Bastion host
|
||||||
|
|
||||||
|
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Test access
|
||||||
|
|
||||||
|
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||||
example-k8s_node-1 | SUCCESS => {
|
example-k8s_node-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
|
@ -291,21 +325,17 @@ example-k8s-master-1 | SUCCESS => {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
if you are deploying a system that needs bootstrapping, like Container Linux by
|
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
|
||||||
CoreOS, these might have a state`FAILED` due to Container Linux by CoreOS not
|
|
||||||
having python. As long as the state is not`UNREACHABLE`, this is fine.
|
|
||||||
|
|
||||||
if it fails try to connect manually via SSH ... it could be something as simple as a stale host key.
|
### Configure cluster variables
|
||||||
|
|
||||||
## Configure Cluster variables
|
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||||
|
- Set variable **bootstrap_os** appropriately for your desired image:
|
||||||
Edit `inventory/sample/group_vars/all.yml`:
|
|
||||||
- Set variable **bootstrap_os** according selected image
|
|
||||||
```
|
```
|
||||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
bootstrap_os: coreos
|
bootstrap_os: coreos
|
||||||
```
|
```
|
||||||
- **bin_dir**
|
- **bin_dir**:
|
||||||
```
|
```
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
# Default:
|
# Default:
|
||||||
|
@ -313,20 +343,19 @@ bootstrap_os: coreos
|
||||||
# For Container Linux by CoreOS:
|
# For Container Linux by CoreOS:
|
||||||
bin_dir: /opt/bin
|
bin_dir: /opt/bin
|
||||||
```
|
```
|
||||||
- and **cloud_provider**
|
- and **cloud_provider**:
|
||||||
```
|
```
|
||||||
cloud_provider: openstack
|
cloud_provider: openstack
|
||||||
```
|
```
|
||||||
Edit `inventory/sample/group_vars/k8s-cluster.yml`:
|
Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
|
||||||
- Set variable **kube_network_plugin** according selected networking
|
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||||
|
- **flannel** works out-of-the-box
|
||||||
|
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||||
```
|
```
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico, weave or flannel)
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
```
|
```
|
||||||
> flannel works out-of-the-box
|
|
||||||
|
|
||||||
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
|
||||||
- Set variable **resolvconf_mode**
|
- Set variable **resolvconf_mode**
|
||||||
```
|
```
|
||||||
# Can be docker_dns, host_resolvconf or none
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
@ -336,18 +365,19 @@ kube_network_plugin: flannel
|
||||||
resolvconf_mode: host_resolvconf
|
resolvconf_mode: host_resolvconf
|
||||||
```
|
```
|
||||||
|
|
||||||
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
### Deploy Kubernetes
|
||||||
|
|
||||||
## Deploy kubernetes:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Set up local kubectl
|
This will take some time as there are many tasks to run.
|
||||||
1. Install kubectl on your workstation:
|
|
||||||
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
## Kubernetes
|
||||||
2. Add route to internal IP of master node (if needed):
|
|
||||||
|
### Set up kubectl
|
||||||
|
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
|
||||||
|
2. Add a route to the internal IP of a master node (if needed):
|
||||||
```
|
```
|
||||||
sudo route add [master-internal-ip] gw [router-ip]
|
sudo route add [master-internal-ip] gw [router-ip]
|
||||||
```
|
```
|
||||||
|
@ -355,28 +385,28 @@ or
|
||||||
```
|
```
|
||||||
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||||
```
|
```
|
||||||
3. List Kubernetes certs&keys:
|
3. List Kubernetes certificates & keys:
|
||||||
```
|
```
|
||||||
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||||
```
|
```
|
||||||
4. Get admin's certs&key:
|
4. Get `admin`'s certificates and keys:
|
||||||
```
|
```
|
||||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||||
```
|
```
|
||||||
5. Configure kubectl:
|
5. Configure kubectl:
|
||||||
```
|
```ShellSession
|
||||||
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||||
--certificate-authority=ca.pem
|
--certificate-authority=ca.pem
|
||||||
|
|
||||||
kubectl config set-credentials default-admin \
|
$ kubectl config set-credentials default-admin \
|
||||||
--certificate-authority=ca.pem \
|
--certificate-authority=ca.pem \
|
||||||
--client-key=admin-key.pem \
|
--client-key=admin-key.pem \
|
||||||
--client-certificate=admin.pem
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||||
kubectl config use-context default-system
|
$ kubectl config use-context default-system
|
||||||
```
|
```
|
||||||
7. Check it:
|
7. Check it:
|
||||||
```
|
```
|
||||||
|
@ -393,14 +423,15 @@ You can tell kubectl to ignore this condition by adding the
|
||||||
|
|
||||||
## GlusterFS
|
## GlusterFS
|
||||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||||
[glusterfs playbook documentation](../../network-storage/glusterfs/README.md)
|
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||||
for instructions.
|
for instructions.
|
||||||
|
|
||||||
Basically you will install gluster as
|
Basically you will install Gluster as
|
||||||
```bash
|
```ShellSession
|
||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
# What's next
|
## What's next
|
||||||
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
|
||||||
|
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../../../inventory/group_vars
|
|
|
@ -1,7 +1,7 @@
|
||||||
resource "openstack_networking_router_v2" "k8s" {
|
resource "openstack_networking_router_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-router"
|
name = "${var.cluster_name}-router"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
external_gateway = "${var.external_net}"
|
external_network_id = "${var.external_net}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_network_v2" "k8s" {
|
resource "openstack_networking_network_v2" "k8s" {
|
||||||
|
|
|
@ -2,6 +2,6 @@ output "router_id" {
|
||||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "network_id" {
|
output "subnet_id" {
|
||||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
# your Kubernetes cluster name here
|
||||||
|
cluster_name = "i-didnt-read-the-docs"
|
||||||
|
|
||||||
|
# SSH key to use for access to nodes
|
||||||
|
public_key_path = "~/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
|
# image to use for bastion, masters, standalone etcd instances, and nodes
|
||||||
|
image = "<image name>"
|
||||||
|
# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
|
||||||
|
ssh_user = "<cloud-provisioned user>"
|
||||||
|
|
||||||
|
# 0|1 bastion nodes
|
||||||
|
number_of_bastions = 0
|
||||||
|
#flavor_bastion = "<UUID>"
|
||||||
|
|
||||||
|
# standalone etcds
|
||||||
|
number_of_etcd = 0
|
||||||
|
|
||||||
|
# masters
|
||||||
|
number_of_k8s_masters = 1
|
||||||
|
number_of_k8s_masters_no_etcd = 0
|
||||||
|
number_of_k8s_masters_no_floating_ip = 0
|
||||||
|
number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||||
|
flavor_k8s_master = "<UUID>"
|
||||||
|
|
||||||
|
# nodes
|
||||||
|
number_of_k8s_nodes = 2
|
||||||
|
number_of_k8s_nodes_no_floating_ip = 4
|
||||||
|
#flavor_k8s_node = "<UUID>"
|
||||||
|
|
||||||
|
# GlusterFS
|
||||||
|
# either 0 or more than one
|
||||||
|
#number_of_gfs_nodes_no_floating_ip = 0
|
||||||
|
#gfs_volume_size_in_gb = 150
|
||||||
|
# Container Linux does not support GlusterFS
|
||||||
|
#image_gfs = "<image name>"
|
||||||
|
# May be different from other nodes
|
||||||
|
#ssh_user_gfs = "ubuntu"
|
||||||
|
#flavor_gfs_node = "<UUID>"
|
||||||
|
|
||||||
|
# networking
|
||||||
|
network_name = "<network>"
|
||||||
|
external_net = "<UUID>"
|
||||||
|
floatingip_pool = "<pool>"
|
||||||
|
|
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../inventory/sample/group_vars
|
|
@ -7,7 +7,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
* For bootstrapping with Vagrant, use box centos/atomic-host
|
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
|
||||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||||
|
@ -17,6 +17,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
||||||
sudo /sbin/ifup enp0s8
|
sudo /sbin/ifup enp0s8
|
||||||
```
|
```
|
||||||
|
|
||||||
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||||
|
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
Then you can proceed to [cluster deployment](#run-deployment)
|
|
@ -62,6 +62,14 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
|
||||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||||
all queries.
|
all queries.
|
||||||
|
|
||||||
|
#### coredns
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||||
|
all queries.
|
||||||
|
|
||||||
|
#### coredns_dual
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||||
|
all queries. It will also deploy a secondary CoreDNS stack
|
||||||
|
|
||||||
#### manual
|
#### manual
|
||||||
This does not install dnsmasq or kubedns, but allows you to specify
|
This does not install dnsmasq or kubedns, but allows you to specify
|
||||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||||
|
|
|
@ -18,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
|
||||||
|
|
||||||
Example inventory generator usage:
|
Example inventory generator usage:
|
||||||
|
|
||||||
```
|
cp -r inventory/sample inventory/mycluster
|
||||||
cp -r inventory/sample inventory/mycluster
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
|
||||||
```
|
|
||||||
|
|
||||||
Starting custom deployment
|
Starting custom deployment
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -30,12 +28,10 @@ Starting custom deployment
|
||||||
Once you have an inventory, you may want to customize deployment data vars
|
Once you have an inventory, you may want to customize deployment data vars
|
||||||
and start the deployment:
|
and start the deployment:
|
||||||
|
|
||||||
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
|
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
||||||
|
|
||||||
```
|
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
|
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
```
|
|
||||||
|
|
||||||
See more details in the [ansible guide](ansible.md).
|
See more details in the [ansible guide](ansible.md).
|
||||||
|
|
||||||
|
@ -46,16 +42,28 @@ You may want to add **worker** nodes to your existing cluster. This can be done
|
||||||
|
|
||||||
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||||
|
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
|
Remove nodes
|
||||||
|
------------
|
||||||
|
|
||||||
|
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||||
|
|
||||||
|
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
|
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
```
|
```
|
||||||
|
|
||||||
Connecting to Kubernetes
|
Connecting to Kubernetes
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
By default, Kubespray configures kube-master hosts with insecure access to
|
By default, Kubespray configures kube-master hosts with insecure access to
|
||||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
More details on this process are in the [HA guide](ha-mode.md).
|
More details on this process are in the [HA guide](ha-mode.md).
|
||||||
|
@ -66,7 +74,7 @@ authentication. One could generate a kubeconfig based on one installed
|
||||||
kube-master hosts (needs improvement) or connect with a username and password.
|
kube-master hosts (needs improvement) or connect with a username and password.
|
||||||
By default, a user with admin rights is created, named `kube`.
|
By default, a user with admin rights is created, named `kube`.
|
||||||
The password can be viewed after deployment by looking at the file
|
The password can be viewed after deployment by looking at the file
|
||||||
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||||
password. If you wish to set your own password, just precreate/modify this
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
file yourself.
|
file yourself.
|
||||||
|
|
||||||
|
@ -77,29 +85,33 @@ Accessing Kubernetes Dashboard
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
As of kubernetes-dashboard v1.7.x:
|
As of kubernetes-dashboard v1.7.x:
|
||||||
* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
|
||||||
* Requires RBAC in authorization_modes
|
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||||
* Only serves over https
|
- Requires RBAC in authorization\_modes
|
||||||
* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
|
- Only serves over https
|
||||||
|
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
|
||||||
|
|
||||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||||
https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||||
|
|
||||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||||
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||||
|
|
||||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
|
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
|
||||||
|
|
||||||
Accessing Kubernetes API
|
Accessing Kubernetes API
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||||
host and can optionally be configured on your ansible host by setting
|
host and can optionally be configured on your ansible host by setting
|
||||||
`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
|
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
||||||
admin.conf will appear in the artifacts/ directory after deployment. You can
|
|
||||||
see a list of nodes by running the following commands:
|
|
||||||
|
|
||||||
cd artifacts/
|
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
||||||
./kubectl --kubeconfig admin.conf get nodes
|
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||||
|
|
||||||
If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
|
You can see a list of nodes by running the following commands:
|
||||||
|
|
||||||
|
cd inventory/mycluster/artifacts
|
||||||
|
./kubectl.sh get nodes
|
||||||
|
|
||||||
|
If desired, copy admin.conf to ~/.kube/config.
|
||||||
|
|
|
@ -3,8 +3,7 @@ Large deployments of K8s
|
||||||
|
|
||||||
For a large scaled deployments, consider the following configuration changes:
|
For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
* Tune [ansible settings]
|
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||||
(http://docs.ansible.com/ansible/intro_configuration.html)
|
|
||||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||||
|
|
||||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
|
@ -47,5 +46,8 @@ For a large scaled deployments, consider the following configuration changes:
|
||||||
section of the Getting started guide for tips on creating a large scale
|
section of the Getting started guide for tips on creating a large scale
|
||||||
Ansible inventory.
|
Ansible inventory.
|
||||||
|
|
||||||
|
* Override the ``etcd_events_cluster_setup: true`` store events in a separate
|
||||||
|
dedicated etcd instance.
|
||||||
|
|
||||||
For example, when deploying 200 nodes, you may want to run ansible with
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
Vagrant Install
|
Vagrant Install
|
||||||
=================
|
=================
|
||||||
|
|
||||||
Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
|
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||||
with vmware, but is untested) you should be able to launch a 3 node
|
with vmware, but is untested) you should be able to launch a 3 node
|
||||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||||
|
|
||||||
|
|
13
docs/vars.md
13
docs/vars.md
|
@ -63,7 +63,8 @@ following default cluster paramters:
|
||||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||||
* *dns_setup* - Enables dnsmasq
|
* *dns_setup* - Enables dnsmasq
|
||||||
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||||
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||||
|
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
|
@ -117,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
||||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||||
cgroup-driver option for Kubelet. By default autodetection is used
|
cgroup-driver option for Kubelet. By default autodetection is used
|
||||||
to match Docker configuration.
|
to match Docker configuration.
|
||||||
|
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||||
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
|
*node_labels* must be defined as a dict:
|
||||||
|
```
|
||||||
|
node_labels:
|
||||||
|
label1_name: label1_value
|
||||||
|
label2_name: label2_value
|
||||||
|
```
|
||||||
|
|
||||||
##### Custom flags for Kube Components
|
##### Custom flags for Kube Components
|
||||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||||
|
@ -136,6 +145,6 @@ The possible vars are:
|
||||||
|
|
||||||
By default, a user with admin rights is created, named `kube`.
|
By default, a user with admin rights is created, named `kube`.
|
||||||
The password can be viewed after deployment by looking at the file
|
The password can be viewed after deployment by looking at the file
|
||||||
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||||
password. If you wish to set your own password, just precreate/modify this
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
file yourself or change `kube_api_pwd` var.
|
file yourself or change `kube_api_pwd` var.
|
||||||
|
|
|
@ -42,7 +42,7 @@ bin_dir: /usr/local/bin
|
||||||
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
||||||
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
||||||
## modules.
|
## modules.
|
||||||
# kubelet_load_modules: false
|
#kubelet_load_modules: false
|
||||||
|
|
||||||
## Internal network total size. This is the prefix of the
|
## Internal network total size. This is the prefix of the
|
||||||
## entire network. Must be unused in your environment.
|
## entire network. Must be unused in your environment.
|
||||||
|
@ -76,6 +76,7 @@ bin_dir: /usr/local/bin
|
||||||
#azure_subnet_name:
|
#azure_subnet_name:
|
||||||
#azure_security_group_name:
|
#azure_security_group_name:
|
||||||
#azure_vnet_name:
|
#azure_vnet_name:
|
||||||
|
#azure_vnet_resource_group:
|
||||||
#azure_route_table_name:
|
#azure_route_table_name:
|
||||||
|
|
||||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
|
@ -128,5 +129,9 @@ bin_dir: /usr/local/bin
|
||||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||||
#etcd_metrics: basic
|
#etcd_metrics: basic
|
||||||
|
|
||||||
|
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||||
|
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||||
|
#etcd_memory_limit: "512M"
|
||||||
|
|
||||||
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||||
# kube_read_only_port: 10255
|
#kube_read_only_port: 10255
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
# Kubernetes configuration dirs and system namespace.
|
# Kubernetes configuration dirs and system namespace.
|
||||||
# Those are where all the additional config stuff goes
|
# Those are where all the additional config stuff goes
|
||||||
# the kubernetes normally puts in /srv/kubernets.
|
# the kubernetes normally puts in /srv/kubernetes.
|
||||||
# This puts them in a sane location and namespace.
|
# This puts them in a sane location and namespace.
|
||||||
# Editting those values will almost surely break something.
|
# Editing those values will almost surely break something.
|
||||||
kube_config_dir: /etc/kubernetes
|
kube_config_dir: /etc/kubernetes
|
||||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
system_namespace: kube-system
|
|
||||||
|
|
||||||
# This is where all the cert scripts and certs will be located
|
# This is where all the cert scripts and certs will be located
|
||||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
@ -20,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.9.2
|
kube_version: v1.9.5
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
@ -29,7 +28,7 @@ local_release_dir: "/tmp/releases"
|
||||||
retry_stagger: 5
|
retry_stagger: 5
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
# This is the group that the cert creation scripts chgrp the
|
||||||
# cert files to. Not really changable...
|
# cert files to. Not really changeable...
|
||||||
kube_cert_group: kube-cert
|
kube_cert_group: kube-cert
|
||||||
|
|
||||||
# Cluster Loglevel configuration
|
# Cluster Loglevel configuration
|
||||||
|
@ -37,7 +36,7 @@ kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
# Optionally add groups for user
|
# Optionally add groups for user
|
||||||
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: "{{kube_api_pwd}}"
|
pass: "{{kube_api_pwd}}"
|
||||||
|
@ -113,12 +112,15 @@ kube_apiserver_insecure_port: 8080 # (http)
|
||||||
# Can be ipvs, iptables
|
# Can be ipvs, iptables
|
||||||
kube_proxy_mode: iptables
|
kube_proxy_mode: iptables
|
||||||
|
|
||||||
|
## Encrypting Secret Data at Rest (experimental)
|
||||||
|
kube_encrypt_secret_data: false
|
||||||
|
|
||||||
# DNS configuration.
|
# DNS configuration.
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
ndots: 2
|
ndots: 2
|
||||||
# Can be dnsmasq_kubedns, kubedns, manual or none
|
# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
|
||||||
dns_mode: kubedns
|
dns_mode: kubedns
|
||||||
# Set manual server if using a custom cluster DNS server
|
# Set manual server if using a custom cluster DNS server
|
||||||
#manual_dns_server: 10.x.x.x
|
#manual_dns_server: 10.x.x.x
|
||||||
|
@ -129,6 +131,7 @@ resolvconf_mode: docker_dns
|
||||||
deploy_netchecker: false
|
deploy_netchecker: false
|
||||||
# Ip address of the kubernetes skydns service
|
# Ip address of the kubernetes skydns service
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
|
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
||||||
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
|
@ -159,19 +162,30 @@ dashboard_enabled: true
|
||||||
# Monitoring apps for k8s
|
# Monitoring apps for k8s
|
||||||
efk_enabled: false
|
efk_enabled: false
|
||||||
|
|
||||||
# Helm deployment
|
# Helm deployment. Needs for Prometheus Operator, k8s metrics.
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
|
|
||||||
|
# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
|
||||||
|
prometheus_operator_enabled: false
|
||||||
|
|
||||||
|
# K8s cluster metrics. Installed Helm and Prometheus Operator are required.
|
||||||
|
k8s_metrics_enabled: false
|
||||||
|
|
||||||
# Istio deployment
|
# Istio deployment
|
||||||
istio_enabled: false
|
istio_enabled: false
|
||||||
|
|
||||||
# Registry deployment
|
# Registry deployment
|
||||||
registry_enabled: false
|
registry_enabled: false
|
||||||
|
# registry_namespace: "{{ system_namespace }}"
|
||||||
|
# registry_storage_class: ""
|
||||||
|
# registry_disk_size: "10Gi"
|
||||||
|
|
||||||
# Local volume provisioner deployment
|
# Local volume provisioner deployment
|
||||||
# deprecated will be removed
|
local_volume_provisioner_enabled: false
|
||||||
local_volumes_enabled: false
|
# local_volume_provisioner_namespace: "{{ system_namespace }}"
|
||||||
local_volume_provisioner_enabled: "{{ local_volumes_enabled }}"
|
# local_volume_provisioner_base_dir: /mnt/disks
|
||||||
|
# local_volume_provisioner_mount_dir: /mnt/disks
|
||||||
|
# local_volume_provisioner_storage_class: local-storage
|
||||||
|
|
||||||
# CephFS provisioner deployment
|
# CephFS provisioner deployment
|
||||||
cephfs_provisioner_enabled: false
|
cephfs_provisioner_enabled: false
|
||||||
|
@ -185,12 +199,30 @@ cephfs_provisioner_enabled: false
|
||||||
# cephfs_provisioner_secret: secret
|
# cephfs_provisioner_secret: secret
|
||||||
# cephfs_provisioner_storage_class: cephfs
|
# cephfs_provisioner_storage_class: cephfs
|
||||||
|
|
||||||
|
# Nginx ingress controller deployment
|
||||||
|
ingress_nginx_enabled: false
|
||||||
|
# ingress_nginx_host_network: false
|
||||||
|
# ingress_nginx_namespace: "ingress-nginx"
|
||||||
|
# ingress_nginx_insecure_port: 80
|
||||||
|
# ingress_nginx_secure_port: 443
|
||||||
|
# ingress_nginx_configmap:
|
||||||
|
# map-hash-bucket-size: "128"
|
||||||
|
# ssl-protocols: "SSLv2"
|
||||||
|
# ingress_nginx_configmap_tcp_services:
|
||||||
|
# 9000: "default/example-go:8080"
|
||||||
|
# ingress_nginx_configmap_udp_services:
|
||||||
|
# 53: "kube-system/kube-dns:53"
|
||||||
|
|
||||||
|
# Cert manager deployment
|
||||||
|
cert_manager_enabled: false
|
||||||
|
# cert_manager_namespace: "cert-manager"
|
||||||
|
|
||||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||||
persistent_volumes_enabled: false
|
persistent_volumes_enabled: false
|
||||||
|
|
||||||
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||||
# kubeconfig_localhost: false
|
# kubeconfig_localhost: false
|
||||||
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||||
# kubectl_localhost: false
|
# kubectl_localhost: false
|
||||||
|
|
||||||
# dnsmasq
|
# dnsmasq
|
||||||
|
|
|
@ -26,6 +26,11 @@
|
||||||
# node5
|
# node5
|
||||||
# node6
|
# node6
|
||||||
|
|
||||||
|
# [kube-ingress]
|
||||||
|
# node2
|
||||||
|
# node3
|
||||||
|
|
||||||
# [k8s-cluster:children]
|
# [k8s-cluster:children]
|
||||||
# kube-node
|
|
||||||
# kube-master
|
# kube-master
|
||||||
|
# kube-node
|
||||||
|
# kube-ingress
|
||||||
|
|
|
@ -18,7 +18,9 @@ options:
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
description:
|
description:
|
||||||
- The path and filename of the resource(s) definition file.
|
- The path and filename of the resource(s) definition file(s).
|
||||||
|
- To operate on several files this can accept a comma separated list of files or a list of files.
|
||||||
|
aliases: [ 'files', 'file', 'filenames' ]
|
||||||
kubectl:
|
kubectl:
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
|
@ -86,6 +88,15 @@ EXAMPLES = """
|
||||||
|
|
||||||
- name: test nginx is present
|
- name: test nginx is present
|
||||||
kube: filename=/tmp/nginx.yml
|
kube: filename=/tmp/nginx.yml
|
||||||
|
|
||||||
|
- name: test nginx and postgresql are present
|
||||||
|
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
|
||||||
|
|
||||||
|
- name: test nginx and postgresql are present
|
||||||
|
kube:
|
||||||
|
files:
|
||||||
|
- /tmp/nginx.yml
|
||||||
|
- /tmp/postgresql.yml
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@ -112,7 +123,7 @@ class KubeManager(object):
|
||||||
self.all = module.params.get('all')
|
self.all = module.params.get('all')
|
||||||
self.force = module.params.get('force')
|
self.force = module.params.get('force')
|
||||||
self.name = module.params.get('name')
|
self.name = module.params.get('name')
|
||||||
self.filename = module.params.get('filename')
|
self.filename = [f.strip() for f in module.params.get('filename') or []]
|
||||||
self.resource = module.params.get('resource')
|
self.resource = module.params.get('resource')
|
||||||
self.label = module.params.get('label')
|
self.label = module.params.get('label')
|
||||||
|
|
||||||
|
@ -122,7 +133,7 @@ class KubeManager(object):
|
||||||
rc, out, err = self.module.run_command(args)
|
rc, out, err = self.module.run_command(args)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||||
|
@ -147,7 +158,7 @@ class KubeManager(object):
|
||||||
if not self.filename:
|
if not self.filename:
|
||||||
self.module.fail_json(msg='filename required to create')
|
self.module.fail_json(msg='filename required to create')
|
||||||
|
|
||||||
cmd.append('--filename=' + self.filename)
|
cmd.append('--filename=' + ','.join(self.filename))
|
||||||
|
|
||||||
return self._execute(cmd)
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
@ -161,7 +172,7 @@ class KubeManager(object):
|
||||||
if not self.filename:
|
if not self.filename:
|
||||||
self.module.fail_json(msg='filename required to reload')
|
self.module.fail_json(msg='filename required to reload')
|
||||||
|
|
||||||
cmd.append('--filename=' + self.filename)
|
cmd.append('--filename=' + ','.join(self.filename))
|
||||||
|
|
||||||
return self._execute(cmd)
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
@ -173,7 +184,7 @@ class KubeManager(object):
|
||||||
cmd = ['delete']
|
cmd = ['delete']
|
||||||
|
|
||||||
if self.filename:
|
if self.filename:
|
||||||
cmd.append('--filename=' + self.filename)
|
cmd.append('--filename=' + ','.join(self.filename))
|
||||||
else:
|
else:
|
||||||
if not self.resource:
|
if not self.resource:
|
||||||
self.module.fail_json(msg='resource required to delete without filename')
|
self.module.fail_json(msg='resource required to delete without filename')
|
||||||
|
@ -197,27 +208,31 @@ class KubeManager(object):
|
||||||
def exists(self):
|
def exists(self):
|
||||||
cmd = ['get']
|
cmd = ['get']
|
||||||
|
|
||||||
|
if self.filename:
|
||||||
|
cmd.append('--filename=' + ','.join(self.filename))
|
||||||
|
else:
|
||||||
if not self.resource:
|
if not self.resource:
|
||||||
return False
|
self.module.fail_json(msg='resource required without filename')
|
||||||
|
|
||||||
cmd.append(self.resource)
|
cmd.append(self.resource)
|
||||||
|
|
||||||
if self.name:
|
if self.name:
|
||||||
cmd.append(self.name)
|
cmd.append(self.name)
|
||||||
|
|
||||||
cmd.append('--no-headers')
|
|
||||||
|
|
||||||
if self.label:
|
if self.label:
|
||||||
cmd.append('--selector=' + self.label)
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
if self.all:
|
if self.all:
|
||||||
cmd.append('--all-namespaces')
|
cmd.append('--all-namespaces')
|
||||||
|
|
||||||
|
cmd.append('--no-headers')
|
||||||
|
|
||||||
result = self._execute_nofail(cmd)
|
result = self._execute_nofail(cmd)
|
||||||
if not result:
|
if not result:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
||||||
if not self.force and not self.exists():
|
if not self.force and not self.exists():
|
||||||
|
@ -226,7 +241,7 @@ class KubeManager(object):
|
||||||
cmd = ['stop']
|
cmd = ['stop']
|
||||||
|
|
||||||
if self.filename:
|
if self.filename:
|
||||||
cmd.append('--filename=' + self.filename)
|
cmd.append('--filename=' + ','.join(self.filename))
|
||||||
else:
|
else:
|
||||||
if not self.resource:
|
if not self.resource:
|
||||||
self.module.fail_json(msg='resource required to stop without filename')
|
self.module.fail_json(msg='resource required to stop without filename')
|
||||||
|
@ -253,7 +268,7 @@ def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
name=dict(),
|
name=dict(),
|
||||||
filename=dict(),
|
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
|
||||||
namespace=dict(),
|
namespace=dict(),
|
||||||
resource=dict(),
|
resource=dict(),
|
||||||
label=dict(),
|
label=dict(),
|
||||||
|
@ -263,7 +278,8 @@ def main():
|
||||||
all=dict(default=False, type='bool'),
|
all=dict(default=False, type='bool'),
|
||||||
log_level=dict(default=0, type='int'),
|
log_level=dict(default=0, type='int'),
|
||||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||||
)
|
),
|
||||||
|
mutually_exclusive=[['filename', 'list']]
|
||||||
)
|
)
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
|
|
29
remove-node.yml
Normal file
29
remove-node.yml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||||
|
vars_prompt:
|
||||||
|
name: "delete_nodes_confirmation"
|
||||||
|
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||||
|
default: "no"
|
||||||
|
private: no
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: check confirmation
|
||||||
|
fail:
|
||||||
|
msg: "Delete nodes confirmation failed"
|
||||||
|
when: delete_nodes_confirmation != "yes"
|
||||||
|
|
||||||
|
- hosts: kube-master
|
||||||
|
roles:
|
||||||
|
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||||
|
|
||||||
|
- hosts: kube-node
|
||||||
|
roles:
|
||||||
|
- { role: reset, tags: reset }
|
||||||
|
|
||||||
|
- hosts: kube-master
|
||||||
|
roles:
|
||||||
|
- { role: remove-node/post-remove, tags: post-remove }
|
|
@ -91,7 +91,7 @@
|
||||||
- name: Start Resources
|
- name: Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace}}"
|
namespace: "kube-system"
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: dnsmasq
|
k8s-app: dnsmasq
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
||||||
kubernetes.io/cluster-service: 'true'
|
kubernetes.io/cluster-service: 'true'
|
||||||
k8s-app: dnsmasq
|
k8s-app: dnsmasq
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: {{system_namespace}}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- port: 53
|
- port: 53
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
docker_version: '17.03'
|
docker_version: '17.03'
|
||||||
|
docker_selinux_version: '17.03'
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkgs:
|
pkgs:
|
||||||
|
@ -10,11 +11,31 @@ docker_repo_key_info:
|
||||||
docker_repo_info:
|
docker_repo_info:
|
||||||
repos:
|
repos:
|
||||||
|
|
||||||
|
dockerproject_repo_key_info:
|
||||||
|
repo_keys:
|
||||||
|
|
||||||
|
dockerproject_repo_info:
|
||||||
|
repos:
|
||||||
|
|
||||||
docker_dns_servers_strict: yes
|
docker_dns_servers_strict: yes
|
||||||
|
|
||||||
docker_container_storage_setup: false
|
docker_container_storage_setup: false
|
||||||
|
|
||||||
docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
# Used to override obsoletes=0
|
||||||
docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
yum_conf: /etc/yum.conf
|
||||||
docker_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
docker_yum_conf: /etc/yum_docker.conf
|
||||||
docker_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
|
||||||
|
# CentOS/RedHat docker-ce repo
|
||||||
|
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
||||||
|
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||||
|
# Ubuntu docker-ce repo
|
||||||
|
docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||||
|
docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||||
|
# Debian docker-ce repo
|
||||||
|
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
|
||||||
|
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
|
||||||
|
# dockerproject repo
|
||||||
|
dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||||
|
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||||
|
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||||
|
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
||||||
|
|
|
@ -30,7 +30,9 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: ensure docker repository public key is installed
|
- import_tasks: pre-upgrade.yml
|
||||||
|
|
||||||
|
- name: ensure docker-ce repository public key is installed
|
||||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||||
args:
|
args:
|
||||||
id: "{{item}}"
|
id: "{{item}}"
|
||||||
|
@ -41,15 +43,36 @@
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
|
||||||
|
|
||||||
- name: ensure docker repository is enabled
|
- name: ensure docker-ce repository is enabled
|
||||||
action: "{{ docker_repo_info.pkg_repo }}"
|
action: "{{ docker_repo_info.pkg_repo }}"
|
||||||
args:
|
args:
|
||||||
repo: "{{item}}"
|
repo: "{{item}}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ docker_repo_info.repos }}"
|
with_items: "{{ docker_repo_info.repos }}"
|
||||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
||||||
|
|
||||||
|
- name: ensure docker-engine repository public key is installed
|
||||||
|
action: "{{ dockerproject_repo_key_info.pkg_key }}"
|
||||||
|
args:
|
||||||
|
id: "{{item}}"
|
||||||
|
url: "{{dockerproject_repo_key_info.url}}"
|
||||||
|
state: present
|
||||||
|
register: keyserver_task_result
|
||||||
|
until: keyserver_task_result|succeeded
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
|
||||||
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
|
||||||
|
|
||||||
|
- name: ensure docker-engine repository is enabled
|
||||||
|
action: "{{ dockerproject_repo_info.pkg_repo }}"
|
||||||
|
args:
|
||||||
|
repo: "{{item}}"
|
||||||
|
state: present
|
||||||
|
with_items: "{{ dockerproject_repo_info.repos }}"
|
||||||
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
|
||||||
|
|
||||||
- name: Configure docker repository on RedHat/CentOS
|
- name: Configure docker repository on RedHat/CentOS
|
||||||
template:
|
template:
|
||||||
|
@ -57,11 +80,27 @@
|
||||||
dest: "/etc/yum.repos.d/docker.repo"
|
dest: "/etc/yum.repos.d/docker.repo"
|
||||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
|
- name: Copy yum.conf for editing
|
||||||
|
copy:
|
||||||
|
src: "{{ yum_conf }}"
|
||||||
|
dest: "{{ docker_yum_conf }}"
|
||||||
|
remote_src: yes
|
||||||
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
|
- name: Edit copy of yum.conf to set obsoletes=0
|
||||||
|
lineinfile:
|
||||||
|
path: "{{ docker_yum_conf }}"
|
||||||
|
state: present
|
||||||
|
regexp: '^obsoletes='
|
||||||
|
line: 'obsoletes=0'
|
||||||
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
- name: ensure docker packages are installed
|
- name: ensure docker packages are installed
|
||||||
action: "{{ docker_package_info.pkg_mgr }}"
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
args:
|
args:
|
||||||
pkg: "{{item.name}}"
|
pkg: "{{item.name}}"
|
||||||
force: "{{item.force|default(omit)}}"
|
force: "{{item.force|default(omit)}}"
|
||||||
|
conf_file: "{{item.yum_conf|default(omit)}}"
|
||||||
state: present
|
state: present
|
||||||
register: docker_task_result
|
register: docker_task_result
|
||||||
until: docker_task_result|succeeded
|
until: docker_task_result|succeeded
|
||||||
|
|
25
roles/docker/tasks/pre-upgrade.yml
Normal file
25
roles/docker/tasks/pre-upgrade.yml
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
- name: Ensure old versions of Docker are not installed. | Debian
|
||||||
|
package:
|
||||||
|
name: '{{ item }}'
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- docker
|
||||||
|
- docker-engine
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||||
|
|
||||||
|
- name: Ensure old versions of Docker are not installed. | RedHat
|
||||||
|
package:
|
||||||
|
name: '{{ item }}'
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- docker
|
||||||
|
- docker-common
|
||||||
|
- docker-engine
|
||||||
|
- docker-selinux
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||||
|
- not is_atomic
|
|
@ -3,8 +3,10 @@
|
||||||
- name: set dns server for docker
|
- name: set dns server for docker
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: |-
|
docker_dns_servers: |-
|
||||||
{%- if dns_mode == 'kubedns' -%}
|
{%- if dns_mode in ['kubedns', 'coredns'] -%}
|
||||||
{{ [ skydns_server ] }}
|
{{ [ skydns_server ] }}
|
||||||
|
{%- elif dns_mode == 'coredns_dual' -%}
|
||||||
|
{{ [ skydns_server ] + [ skydns_server_secondary ] }}
|
||||||
{%- elif dns_mode == 'dnsmasq_kubedns' -%}
|
{%- elif dns_mode == 'dnsmasq_kubedns' -%}
|
||||||
{{ [ dnsmasq_dns_server ] }}
|
{{ [ dnsmasq_dns_server ] }}
|
||||||
{%- elif dns_mode == 'manual' -%}
|
{%- elif dns_mode == 'manual' -%}
|
||||||
|
@ -24,7 +26,7 @@
|
||||||
- name: add upstream dns servers (only when dnsmasq is not used)
|
- name: add upstream dns servers (only when dnsmasq is not used)
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
|
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
|
||||||
when: dns_mode == 'kubedns'
|
when: dns_mode in ['kubedns', 'coredns', 'coreos_dual']
|
||||||
|
|
||||||
- name: add global searchdomains
|
- name: add global searchdomains
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: get systemd version
|
- name: get systemd version
|
||||||
command: systemctl --version | head -n 1 | cut -d " " -f 2
|
shell: systemctl --version | head -n 1 | cut -d " " -f 2
|
||||||
register: systemd_version
|
register: systemd_version
|
||||||
when: not is_atomic
|
when: not is_atomic
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -31,7 +31,10 @@ LimitNOFILE=1048576
|
||||||
LimitNPROC=1048576
|
LimitNPROC=1048576
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
TimeoutStartSec=1min
|
TimeoutStartSec=1min
|
||||||
Restart=on-abnormal
|
# restart the docker process if it exits prematurely
|
||||||
|
Restart=on-failure
|
||||||
|
StartLimitBurst=3
|
||||||
|
StartLimitInterval=60s
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -1,7 +1,15 @@
|
||||||
[dockerrepo]
|
[docker-ce]
|
||||||
name=Docker Repository
|
name=Docker-CE Repository
|
||||||
baseurl={{ docker_rh_repo_base_url }}
|
baseurl={{ docker_rh_repo_base_url }}
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
gpgkey={{ docker_rh_repo_gpgkey }}
|
gpgkey={{ docker_rh_repo_gpgkey }}
|
||||||
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
||||||
|
|
||||||
|
[docker-engine]
|
||||||
|
name=Docker-Engine Repository
|
||||||
|
baseurl={{ dockerproject_rh_repo_base_url }}
|
||||||
|
enabled=1
|
||||||
|
gpgcheck=1
|
||||||
|
gpgkey={{ dockerproject_rh_repo_gpgkey }}
|
||||||
|
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
||||||
|
|
|
@ -1,15 +1,16 @@
|
||||||
---
|
---
|
||||||
docker_kernel_min_version: '3.10'
|
docker_kernel_min_version: '3.10'
|
||||||
|
|
||||||
|
# https://download.docker.com/linux/debian/
|
||||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||||
docker_versioned_pkg:
|
docker_versioned_pkg:
|
||||||
'latest': docker-engine
|
'latest': docker-ce
|
||||||
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||||
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
|
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
|
||||||
'17.03': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
'17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||||
'stable': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||||
'edge': docker-engine=17.05.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
|
@ -19,14 +20,28 @@ docker_package_info:
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: apt_key
|
pkg_key: apt_key
|
||||||
url: '{{ docker_apt_repo_gpgkey }}'
|
url: '{{ docker_debian_repo_gpgkey }}'
|
||||||
repo_keys:
|
repo_keys:
|
||||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
- 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||||
|
|
||||||
docker_repo_info:
|
docker_repo_info:
|
||||||
pkg_repo: apt_repository
|
pkg_repo: apt_repository
|
||||||
repos:
|
repos:
|
||||||
- >
|
- >
|
||||||
deb {{ docker_apt_repo_base_url }}
|
deb {{ docker_debian_repo_base_url }}
|
||||||
|
{{ ansible_distribution_release|lower }}
|
||||||
|
stable
|
||||||
|
|
||||||
|
dockerproject_repo_key_info:
|
||||||
|
pkg_key: apt_key
|
||||||
|
url: '{{ dockerproject_apt_repo_gpgkey }}'
|
||||||
|
repo_keys:
|
||||||
|
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||||
|
|
||||||
|
dockerproject_repo_info:
|
||||||
|
pkg_repo: apt_repository
|
||||||
|
repos:
|
||||||
|
- >
|
||||||
|
deb {{ dockerproject_apt_repo_base_url }}
|
||||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||||
main
|
main
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
---
|
|
||||||
docker_kernel_min_version: '0'
|
|
||||||
|
|
||||||
# versioning: docker-io itself is pinned at docker 1.5
|
|
||||||
|
|
||||||
docker_package_info:
|
|
||||||
pkg_mgr: yum
|
|
||||||
pkgs:
|
|
||||||
- name: docker-io
|
|
||||||
|
|
||||||
docker_repo_key_info:
|
|
||||||
pkg_key: ''
|
|
||||||
repo_keys: []
|
|
||||||
|
|
||||||
docker_repo_info:
|
|
||||||
pkg_repo: ''
|
|
||||||
repos: []
|
|
|
@ -1,28 +0,0 @@
|
||||||
---
|
|
||||||
docker_kernel_min_version: '0'
|
|
||||||
|
|
||||||
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
|
|
||||||
# https://download.docker.com/linux/fedora/7/x86_64/stable/
|
|
||||||
# the package names below are guesses;
|
|
||||||
# docs mention `sudo dnf config-manager --enable docker-ce-edge` for edge
|
|
||||||
docker_versioned_pkg:
|
|
||||||
'latest': docker
|
|
||||||
'1.11': docker-1:1.11.2
|
|
||||||
'1.12': docker-1:1.12.6
|
|
||||||
'1.13': docker-1.13.1
|
|
||||||
'17.03': docker-17.03.1
|
|
||||||
'stable': docker-ce
|
|
||||||
'edge': docker-ce-edge
|
|
||||||
|
|
||||||
docker_package_info:
|
|
||||||
pkg_mgr: dnf
|
|
||||||
pkgs:
|
|
||||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
|
||||||
|
|
||||||
docker_repo_key_info:
|
|
||||||
pkg_key: ''
|
|
||||||
repo_keys: []
|
|
||||||
|
|
||||||
docker_repo_info:
|
|
||||||
pkg_repo: ''
|
|
||||||
repos: []
|
|
|
@ -1,24 +1,36 @@
|
||||||
---
|
---
|
||||||
docker_kernel_min_version: '0'
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||||
|
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
|
||||||
|
# https://yum.dockerproject.org/repo/main/centos/7
|
||||||
# or do 'yum --showduplicates list docker-engine'
|
# or do 'yum --showduplicates list docker-engine'
|
||||||
docker_versioned_pkg:
|
docker_versioned_pkg:
|
||||||
'latest': docker-engine
|
'latest': docker-ce
|
||||||
'1.11': docker-engine-1.11.2-1.el7.centos
|
'1.11': docker-engine-1.11.2-1.el7.centos
|
||||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||||
'1.13': docker-engine-1.13.1-1.el7.centos
|
'1.13': docker-engine-1.13.1-1.el7.centos
|
||||||
'17.03': docker-engine-17.03.1.ce-1.el7.centos
|
'17.03': docker-ce-17.03.2.ce-1.el7.centos
|
||||||
'stable': docker-engine-17.03.1.ce-1.el7.centos
|
'stable': docker-ce-17.03.2.ce-1.el7.centos
|
||||||
'edge': docker-engine-17.05.0.ce-1.el7.centos
|
'edge': docker-ce-17.12.1.ce-1.el7.centos
|
||||||
|
|
||||||
|
docker_selinux_versioned_pkg:
|
||||||
|
'latest': docker-ce-selinux
|
||||||
|
'1.11': docker-engine-selinux-1.11.2-1.el7.centos
|
||||||
|
'1.12': docker-engine-selinux-1.12.6-1.el7.centos
|
||||||
|
'1.13': docker-engine-selinux-1.13.1-1.el7.centos
|
||||||
|
'17.03': docker-ce-selinux-17.03.2.ce-1.el7.centos
|
||||||
|
'stable': docker-ce-selinux-17.03.2.ce-1.el7.centos
|
||||||
|
'edge': docker-ce-selinux-17.03.2.ce-1.el7.centos
|
||||||
|
|
||||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
|
||||||
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
|
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: yum
|
pkg_mgr: yum
|
||||||
pkgs:
|
pkgs:
|
||||||
|
- name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
|
||||||
|
yum_conf: "{{ docker_yum_conf }}"
|
||||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||||
|
yum_conf: "{{ docker_yum_conf }}"
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
---
|
---
|
||||||
docker_kernel_min_version: '3.10'
|
docker_kernel_min_version: '3.10'
|
||||||
|
|
||||||
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
# https://download.docker.com/linux/ubuntu/
|
||||||
docker_versioned_pkg:
|
docker_versioned_pkg:
|
||||||
'latest': docker-engine
|
'latest': docker-ce
|
||||||
'1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||||
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
|
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||||
'17.03': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
'17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||||
'stable': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||||
'edge': docker-engine=17.05.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
|
@ -19,14 +19,28 @@ docker_package_info:
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: apt_key
|
pkg_key: apt_key
|
||||||
url: '{{ docker_apt_repo_gpgkey }}'
|
url: '{{ docker_ubuntu_repo_gpgkey }}'
|
||||||
repo_keys:
|
repo_keys:
|
||||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
- 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||||
|
|
||||||
docker_repo_info:
|
docker_repo_info:
|
||||||
pkg_repo: apt_repository
|
pkg_repo: apt_repository
|
||||||
repos:
|
repos:
|
||||||
- >
|
- >
|
||||||
deb {{ docker_apt_repo_base_url }}
|
deb {{ docker_ubuntu_repo_base_url }}
|
||||||
|
{{ ansible_distribution_release|lower }}
|
||||||
|
stable
|
||||||
|
|
||||||
|
dockerproject_repo_key_info:
|
||||||
|
pkg_key: apt_key
|
||||||
|
url: '{{ dockerproject_apt_repo_gpgkey }}'
|
||||||
|
repo_keys:
|
||||||
|
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||||
|
|
||||||
|
dockerproject_repo_info:
|
||||||
|
pkg_repo: apt_repository
|
||||||
|
repos:
|
||||||
|
- >
|
||||||
|
deb {{ dockerproject_apt_repo_base_url }}
|
||||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||||
main
|
main
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
local_release_dir: /tmp
|
local_release_dir: /tmp/releases
|
||||||
|
|
||||||
# Used to only evaluate vars from download role
|
# Used to only evaluate vars from download role
|
||||||
skip_downloads: false
|
skip_downloads: false
|
||||||
|
@ -24,24 +24,24 @@ download_always_pull: False
|
||||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
||||||
|
|
||||||
# Versions
|
# Versions
|
||||||
kube_version: v1.9.2
|
kube_version: v1.9.5
|
||||||
kubeadm_version: "{{ kube_version }}"
|
kubeadm_version: "{{ kube_version }}"
|
||||||
etcd_version: v3.2.4
|
etcd_version: v3.2.4
|
||||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
calico_version: "v2.6.2"
|
calico_version: "v2.6.8"
|
||||||
calico_ctl_version: "v1.6.1"
|
calico_ctl_version: "v1.6.3"
|
||||||
calico_cni_version: "v1.11.0"
|
calico_cni_version: "v1.11.4"
|
||||||
calico_policy_version: "v1.0.0"
|
calico_policy_version: "v1.0.3"
|
||||||
calico_rr_version: "v0.4.0"
|
calico_rr_version: "v0.4.2"
|
||||||
flannel_version: "v0.9.1"
|
flannel_version: "v0.10.0"
|
||||||
flannel_cni_version: "v0.3.0"
|
flannel_cni_version: "v0.3.0"
|
||||||
istio_version: "0.2.6"
|
istio_version: "0.2.6"
|
||||||
vault_version: 0.8.1
|
vault_version: 0.8.1
|
||||||
weave_version: 2.2.0
|
weave_version: 2.2.1
|
||||||
pod_infra_version: 3.0
|
pod_infra_version: 3.0
|
||||||
contiv_version: 1.1.7
|
contiv_version: 1.1.7
|
||||||
cilium_version: "v1.0.0-rc4"
|
cilium_version: "v1.0.0-rc8"
|
||||||
|
|
||||||
# Download URLs
|
# Download URLs
|
||||||
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
||||||
|
@ -50,7 +50,7 @@ vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/va
|
||||||
|
|
||||||
# Checksums
|
# Checksums
|
||||||
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
|
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
|
||||||
kubeadm_checksum: 560b44a2b91747f4fb64ac8754fcf65db9a39a84c6b54d4e6483400ac6c674fc
|
kubeadm_checksum: 12b6e9ac1624852b7c978bde70b9bde9ca0e4fc6581d09bddfb117bb41f93c74
|
||||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||||
|
|
||||||
# Containers
|
# Containers
|
||||||
|
@ -70,8 +70,24 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
||||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||||
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
istio_proxy_image_repo: docker.io/istio/proxy
|
||||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
istio_proxy_image_tag: "{{ istio_version }}"
|
||||||
|
istio_proxy_init_image_repo: docker.io/istio/proxy_init
|
||||||
|
istio_proxy_init_image_tag: "{{ istio_version }}"
|
||||||
|
istio_ca_image_repo: docker.io/istio/istio-ca
|
||||||
|
istio_ca_image_tag: "{{ istio_version }}"
|
||||||
|
istio_mixer_image_repo: docker.io/istio/mixer
|
||||||
|
istio_mixer_image_tag: "{{ istio_version }}"
|
||||||
|
istio_pilot_image_repo: docker.io/istio/pilot
|
||||||
|
istio_pilot_image_tag: "{{ istio_version }}"
|
||||||
|
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
|
||||||
|
istio_proxy_debug_image_tag: "{{ istio_version }}"
|
||||||
|
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
|
||||||
|
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
|
||||||
|
istio_statsd_image_repo: prom/statsd-exporter
|
||||||
|
istio_statsd_image_tag: latest
|
||||||
|
hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
|
||||||
|
hyperkube_image_tag: "{{ kube_version }}"
|
||||||
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||||
install_socat_image_repo: "xueshanf/install-socat"
|
install_socat_image_repo: "xueshanf/install-socat"
|
||||||
|
@ -91,7 +107,6 @@ contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
||||||
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
||||||
cilium_image_repo: "docker.io/cilium/cilium"
|
cilium_image_repo: "docker.io/cilium/cilium"
|
||||||
cilium_image_tag: "{{ cilium_version }}"
|
cilium_image_tag: "{{ cilium_version }}"
|
||||||
|
|
||||||
nginx_image_repo: nginx
|
nginx_image_repo: nginx
|
||||||
nginx_image_tag: 1.13
|
nginx_image_tag: 1.13
|
||||||
dnsmasq_version: 2.78
|
dnsmasq_version: 2.78
|
||||||
|
@ -100,6 +115,9 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||||
kubedns_version: 1.14.8
|
kubedns_version: 1.14.8
|
||||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
||||||
kubedns_image_tag: "{{ kubedns_version }}"
|
kubedns_image_tag: "{{ kubedns_version }}"
|
||||||
|
coredns_version: 1.1.0
|
||||||
|
coredns_image_repo: "docker.io/coredns/coredns"
|
||||||
|
coredns_image_tag: "{{ coredns_version }}"
|
||||||
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
|
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
|
||||||
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
|
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
|
||||||
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
|
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
|
||||||
|
@ -121,14 +139,30 @@ fluentd_image_tag: "{{ fluentd_version }}"
|
||||||
kibana_version: "v4.6.1"
|
kibana_version: "v4.6.1"
|
||||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||||
kibana_image_tag: "{{ kibana_version }}"
|
kibana_image_tag: "{{ kibana_version }}"
|
||||||
|
helm_version: "v2.8.1"
|
||||||
helm_version: "v2.7.2"
|
|
||||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||||
helm_image_tag: "{{ helm_version }}"
|
helm_image_tag: "{{ helm_version }}"
|
||||||
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
||||||
tiller_image_tag: "{{ helm_version }}"
|
tiller_image_tag: "{{ helm_version }}"
|
||||||
vault_image_repo: "vault"
|
vault_image_repo: "vault"
|
||||||
vault_image_tag: "{{ vault_version }}"
|
vault_image_tag: "{{ vault_version }}"
|
||||||
|
registry_image_repo: "registry"
|
||||||
|
registry_image_tag: "2.6"
|
||||||
|
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
|
||||||
|
registry_proxy_image_tag: "0.4"
|
||||||
|
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
|
||||||
|
local_volume_provisioner_image_tag: "v2.0.0"
|
||||||
|
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
|
||||||
|
cephfs_provisioner_image_tag: "92295a30"
|
||||||
|
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
|
||||||
|
ingress_nginx_controller_image_tag: "0.12.0"
|
||||||
|
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
||||||
|
ingress_nginx_default_backend_image_tag: "1.4"
|
||||||
|
cert_manager_version: "v0.2.3"
|
||||||
|
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||||
|
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||||
|
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
|
||||||
|
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
|
||||||
|
|
||||||
downloads:
|
downloads:
|
||||||
netcheck_server:
|
netcheck_server:
|
||||||
|
@ -137,18 +171,24 @@ downloads:
|
||||||
repo: "{{ netcheck_server_img_repo }}"
|
repo: "{{ netcheck_server_img_repo }}"
|
||||||
tag: "{{ netcheck_server_tag }}"
|
tag: "{{ netcheck_server_tag }}"
|
||||||
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
netcheck_agent:
|
netcheck_agent:
|
||||||
enabled: "{{ deploy_netchecker }}"
|
enabled: "{{ deploy_netchecker }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ netcheck_agent_img_repo }}"
|
repo: "{{ netcheck_agent_img_repo }}"
|
||||||
tag: "{{ netcheck_agent_tag }}"
|
tag: "{{ netcheck_agent_tag }}"
|
||||||
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
etcd:
|
etcd:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ etcd_image_repo }}"
|
repo: "{{ etcd_image_repo }}"
|
||||||
tag: "{{ etcd_image_tag }}"
|
tag: "{{ etcd_image_tag }}"
|
||||||
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- etcd
|
||||||
kubeadm:
|
kubeadm:
|
||||||
enabled: "{{ kubeadm_enabled }}"
|
enabled: "{{ kubeadm_enabled }}"
|
||||||
file: true
|
file: true
|
||||||
|
@ -160,6 +200,8 @@ downloads:
|
||||||
unarchive: false
|
unarchive: false
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
istioctl:
|
istioctl:
|
||||||
enabled: "{{ istio_enabled }}"
|
enabled: "{{ istio_enabled }}"
|
||||||
file: true
|
file: true
|
||||||
|
@ -171,134 +213,250 @@ downloads:
|
||||||
unarchive: false
|
unarchive: false
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
groups:
|
||||||
|
- kube-master
|
||||||
|
istio_proxy:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_proxy_image_repo }}"
|
||||||
|
tag: "{{ istio_proxy_image_tag }}"
|
||||||
|
sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_proxy_init:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_proxy_init_image_repo }}"
|
||||||
|
tag: "{{ istio_proxy_init_image_tag }}"
|
||||||
|
sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_ca:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_ca_image_repo }}"
|
||||||
|
tag: "{{ istio_ca_image_tag }}"
|
||||||
|
sha256: "{{ istio_ca_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_mixer:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_mixer_image_repo }}"
|
||||||
|
tag: "{{ istio_mixer_image_tag }}"
|
||||||
|
sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_pilot:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_pilot_image_repo }}"
|
||||||
|
tag: "{{ istio_pilot_image_tag }}"
|
||||||
|
sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_proxy_debug:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_proxy_debug_image_repo }}"
|
||||||
|
tag: "{{ istio_proxy_debug_image_tag }}"
|
||||||
|
sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_sidecar_initializer:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_sidecar_initializer_image_repo }}"
|
||||||
|
tag: "{{ istio_sidecar_initializer_image_tag }}"
|
||||||
|
sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
istio_statsd:
|
||||||
|
enabled: "{{ istio_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ istio_statsd_image_repo }}"
|
||||||
|
tag: "{{ istio_statsd_image_tag }}"
|
||||||
|
sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
hyperkube:
|
hyperkube:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ hyperkube_image_repo }}"
|
repo: "{{ hyperkube_image_repo }}"
|
||||||
tag: "{{ hyperkube_image_tag }}"
|
tag: "{{ hyperkube_image_tag }}"
|
||||||
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
cilium:
|
cilium:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ cilium_image_repo }}"
|
repo: "{{ cilium_image_repo }}"
|
||||||
tag: "{{ cilium_image_tag }}"
|
tag: "{{ cilium_image_tag }}"
|
||||||
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
flannel:
|
flannel:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ flannel_image_repo }}"
|
repo: "{{ flannel_image_repo }}"
|
||||||
tag: "{{ flannel_image_tag }}"
|
tag: "{{ flannel_image_tag }}"
|
||||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
flannel_cni:
|
flannel_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ flannel_cni_image_repo }}"
|
repo: "{{ flannel_cni_image_repo }}"
|
||||||
tag: "{{ flannel_cni_image_tag }}"
|
tag: "{{ flannel_cni_image_tag }}"
|
||||||
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calicoctl:
|
calicoctl:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calicoctl_image_repo }}"
|
repo: "{{ calicoctl_image_repo }}"
|
||||||
tag: "{{ calicoctl_image_tag }}"
|
tag: "{{ calicoctl_image_tag }}"
|
||||||
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_node:
|
calico_node:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_node_image_repo }}"
|
repo: "{{ calico_node_image_repo }}"
|
||||||
tag: "{{ calico_node_image_tag }}"
|
tag: "{{ calico_node_image_tag }}"
|
||||||
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_cni:
|
calico_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_cni_image_repo }}"
|
repo: "{{ calico_cni_image_repo }}"
|
||||||
tag: "{{ calico_cni_image_tag }}"
|
tag: "{{ calico_cni_image_tag }}"
|
||||||
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_policy:
|
calico_policy:
|
||||||
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_policy_image_repo }}"
|
repo: "{{ calico_policy_image_repo }}"
|
||||||
tag: "{{ calico_policy_image_tag }}"
|
tag: "{{ calico_policy_image_tag }}"
|
||||||
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_rr:
|
calico_rr:
|
||||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr}} and kube_network_plugin == 'calico'"
|
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_rr_image_repo }}"
|
repo: "{{ calico_rr_image_repo }}"
|
||||||
tag: "{{ calico_rr_image_tag }}"
|
tag: "{{ calico_rr_image_tag }}"
|
||||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- calico-rr
|
||||||
weave_kube:
|
weave_kube:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ weave_kube_image_repo }}"
|
repo: "{{ weave_kube_image_repo }}"
|
||||||
tag: "{{ weave_kube_image_tag }}"
|
tag: "{{ weave_kube_image_tag }}"
|
||||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
weave_npc:
|
weave_npc:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ weave_npc_image_repo }}"
|
repo: "{{ weave_npc_image_repo }}"
|
||||||
tag: "{{ weave_npc_image_tag }}"
|
tag: "{{ weave_npc_image_tag }}"
|
||||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
contiv:
|
contiv:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ contiv_image_repo }}"
|
repo: "{{ contiv_image_repo }}"
|
||||||
tag: "{{ contiv_image_tag }}"
|
tag: "{{ contiv_image_tag }}"
|
||||||
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
contiv_auth_proxy:
|
contiv_auth_proxy:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ contiv_auth_proxy_image_repo }}"
|
repo: "{{ contiv_auth_proxy_image_repo }}"
|
||||||
tag: "{{ contiv_auth_proxy_image_tag }}"
|
tag: "{{ contiv_auth_proxy_image_tag }}"
|
||||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
pod_infra:
|
pod_infra:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ pod_infra_image_repo }}"
|
repo: "{{ pod_infra_image_repo }}"
|
||||||
tag: "{{ pod_infra_image_tag }}"
|
tag: "{{ pod_infra_image_tag }}"
|
||||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
install_socat:
|
install_socat:
|
||||||
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ install_socat_image_repo }}"
|
repo: "{{ install_socat_image_repo }}"
|
||||||
tag: "{{ install_socat_image_tag }}"
|
tag: "{{ install_socat_image_tag }}"
|
||||||
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
nginx:
|
nginx:
|
||||||
enabled: true
|
enabled: "{{ loadbalancer_apiserver_localhost }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ nginx_image_repo }}"
|
repo: "{{ nginx_image_repo }}"
|
||||||
tag: "{{ nginx_image_tag }}"
|
tag: "{{ nginx_image_tag }}"
|
||||||
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq:
|
dnsmasq:
|
||||||
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_image_repo }}"
|
repo: "{{ dnsmasq_image_repo }}"
|
||||||
tag: "{{ dnsmasq_image_tag }}"
|
tag: "{{ dnsmasq_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kubedns:
|
kubedns:
|
||||||
enabled: true
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kubedns_image_repo }}"
|
repo: "{{ kubedns_image_repo }}"
|
||||||
tag: "{{ kubedns_image_tag }}"
|
tag: "{{ kubedns_image_tag }}"
|
||||||
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
coredns:
|
||||||
|
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ coredns_image_repo }}"
|
||||||
|
tag: "{{ coredns_image_tag }}"
|
||||||
|
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq_nanny:
|
dnsmasq_nanny:
|
||||||
enabled: true
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_nanny_image_repo }}"
|
repo: "{{ dnsmasq_nanny_image_repo }}"
|
||||||
tag: "{{ dnsmasq_nanny_image_tag }}"
|
tag: "{{ dnsmasq_nanny_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq_sidecar:
|
dnsmasq_sidecar:
|
||||||
enabled: true
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_sidecar_image_repo }}"
|
repo: "{{ dnsmasq_sidecar_image_repo }}"
|
||||||
tag: "{{ dnsmasq_sidecar_image_tag }}"
|
tag: "{{ dnsmasq_sidecar_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kubednsautoscaler:
|
kubednsautoscaler:
|
||||||
enabled: true
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kubednsautoscaler_image_repo }}"
|
repo: "{{ kubednsautoscaler_image_repo }}"
|
||||||
tag: "{{ kubednsautoscaler_image_tag }}"
|
tag: "{{ kubednsautoscaler_image_tag }}"
|
||||||
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
testbox:
|
testbox:
|
||||||
enabled: true
|
enabled: false
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ test_image_repo }}"
|
repo: "{{ test_image_repo }}"
|
||||||
tag: "{{ test_image_tag }}"
|
tag: "{{ test_image_tag }}"
|
||||||
|
@ -309,30 +467,40 @@ downloads:
|
||||||
repo: "{{ elasticsearch_image_repo }}"
|
repo: "{{ elasticsearch_image_repo }}"
|
||||||
tag: "{{ elasticsearch_image_tag }}"
|
tag: "{{ elasticsearch_image_tag }}"
|
||||||
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
fluentd:
|
fluentd:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ fluentd_image_repo }}"
|
repo: "{{ fluentd_image_repo }}"
|
||||||
tag: "{{ fluentd_image_tag }}"
|
tag: "{{ fluentd_image_tag }}"
|
||||||
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kibana:
|
kibana:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kibana_image_repo }}"
|
repo: "{{ kibana_image_repo }}"
|
||||||
tag: "{{ kibana_image_tag }}"
|
tag: "{{ kibana_image_tag }}"
|
||||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
helm:
|
helm:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ helm_image_repo }}"
|
repo: "{{ helm_image_repo }}"
|
||||||
tag: "{{ helm_image_tag }}"
|
tag: "{{ helm_image_tag }}"
|
||||||
sha256: "{{ helm_digest_checksum|default(None) }}"
|
sha256: "{{ helm_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
tiller:
|
tiller:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ tiller_image_repo }}"
|
repo: "{{ tiller_image_repo }}"
|
||||||
tag: "{{ tiller_image_tag }}"
|
tag: "{{ tiller_image_tag }}"
|
||||||
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
vault:
|
vault:
|
||||||
enabled: "{{ cert_management == 'vault' }}"
|
enabled: "{{ cert_management == 'vault' }}"
|
||||||
container: "{{ vault_deployment_type != 'host' }}"
|
container: "{{ vault_deployment_type != 'host' }}"
|
||||||
|
@ -347,6 +515,72 @@ downloads:
|
||||||
unarchive: true
|
unarchive: true
|
||||||
url: "{{ vault_download_url }}"
|
url: "{{ vault_download_url }}"
|
||||||
version: "{{ vault_version }}"
|
version: "{{ vault_version }}"
|
||||||
|
groups:
|
||||||
|
- vault
|
||||||
|
registry:
|
||||||
|
enabled: "{{ registry_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ registry_image_repo }}"
|
||||||
|
tag: "{{ registry_image_tag }}"
|
||||||
|
sha256: "{{ registry_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
registry_proxy:
|
||||||
|
enabled: "{{ registry_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ registry_proxy_image_repo }}"
|
||||||
|
tag: "{{ registry_proxy_image_tag }}"
|
||||||
|
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
local_volume_provisioner:
|
||||||
|
enabled: "{{ local_volume_provisioner_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ local_volume_provisioner_image_repo }}"
|
||||||
|
tag: "{{ local_volume_provisioner_image_tag }}"
|
||||||
|
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
cephfs_provisioner:
|
||||||
|
enabled: "{{ cephfs_provisioner_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ cephfs_provisioner_image_repo }}"
|
||||||
|
tag: "{{ cephfs_provisioner_image_tag }}"
|
||||||
|
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
ingress_nginx_controller:
|
||||||
|
enabled: "{{ ingress_nginx_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ ingress_nginx_controller_image_repo }}"
|
||||||
|
tag: "{{ ingress_nginx_controller_image_tag }}"
|
||||||
|
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-ingress
|
||||||
|
ingress_nginx_default_backend:
|
||||||
|
enabled: "{{ ingress_nginx_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ ingress_nginx_default_backend_image_repo }}"
|
||||||
|
tag: "{{ ingress_nginx_default_backend_image_tag }}"
|
||||||
|
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-ingress
|
||||||
|
cert_manager_controller:
|
||||||
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ cert_manager_controller_image_repo }}"
|
||||||
|
tag: "{{ cert_manager_controller_image_tag }}"
|
||||||
|
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
cert_manager_ingress_shim:
|
||||||
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ cert_manager_ingress_shim_image_repo }}"
|
||||||
|
tag: "{{ cert_manager_ingress_shim_image_tag }}"
|
||||||
|
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
|
||||||
download_defaults:
|
download_defaults:
|
||||||
container: false
|
container: false
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -23,6 +24,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
delegate_facts: yes
|
delegate_facts: yes
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
@ -38,3 +40,4 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: file_download | Download item
|
- name: file_download | Download item
|
||||||
get_url:
|
get_url:
|
||||||
|
@ -28,6 +29,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: file_download | Extract archives
|
- name: file_download | Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
|
@ -40,3 +42,4 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
- download.unarchive|default(False)
|
- download.unarchive|default(False)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -17,6 +18,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -27,6 +29,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: "container_download | Update the 'container_changed' fact"
|
- name: "container_download | Update the 'container_changed' fact"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -36,6 +39,7 @@
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
run_once: "{{ download_run_once }}"
|
run_once: "{{ download_run_once }}"
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -53,6 +57,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -68,6 +73,7 @@
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
|
||||||
- (container_changed or not img.stat.exists)
|
- (container_changed or not img.stat.exists)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: container_download | copy container images to ansible host
|
- name: container_download | copy container images to ansible host
|
||||||
synchronize:
|
synchronize:
|
||||||
|
@ -87,6 +93,7 @@
|
||||||
- inventory_hostname == download_delegate
|
- inventory_hostname == download_delegate
|
||||||
- download_delegate != "localhost"
|
- download_delegate != "localhost"
|
||||||
- saved.changed
|
- saved.changed
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: container_download | upload container images to nodes
|
- name: container_download | upload container images to nodes
|
||||||
synchronize:
|
synchronize:
|
||||||
|
@ -108,6 +115,7 @@
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||||
inventory_hostname != download_delegate or
|
inventory_hostname != download_delegate or
|
||||||
download_delegate == "localhost")
|
download_delegate == "localhost")
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- upload
|
- upload
|
||||||
- upgrade
|
- upgrade
|
||||||
|
@ -120,6 +128,7 @@
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||||
inventory_hostname != download_delegate or download_delegate == "localhost")
|
inventory_hostname != download_delegate or download_delegate == "localhost")
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- upload
|
- upload
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
|
@ -4,6 +4,7 @@ etcd_cluster_setup: true
|
||||||
|
|
||||||
etcd_backup_prefix: "/var/backups"
|
etcd_backup_prefix: "/var/backups"
|
||||||
etcd_data_dir: "/var/lib/etcd"
|
etcd_data_dir: "/var/lib/etcd"
|
||||||
|
etcd_events_data_dir: "/var/lib/etcd-events"
|
||||||
|
|
||||||
etcd_config_dir: /etc/ssl/etcd
|
etcd_config_dir: /etc/ssl/etcd
|
||||||
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
||||||
|
@ -11,9 +12,9 @@ etcd_cert_group: root
|
||||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||||
# entries to the certificate
|
# entries to the certificate
|
||||||
etcd_cert_alt_names:
|
etcd_cert_alt_names:
|
||||||
- "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
|
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||||
- "etcd.{{ system_namespace }}.svc"
|
- "etcd.kube-system.svc"
|
||||||
- "etcd.{{ system_namespace }}"
|
- "etcd.kube-system"
|
||||||
- "etcd"
|
- "etcd"
|
||||||
|
|
||||||
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||||
|
@ -21,6 +22,13 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||||
etcd_heartbeat_interval: "250"
|
etcd_heartbeat_interval: "250"
|
||||||
etcd_election_timeout: "5000"
|
etcd_election_timeout: "5000"
|
||||||
|
|
||||||
|
# etcd_snapshot_count: "10000"
|
||||||
|
|
||||||
|
# Parameters for ionice
|
||||||
|
# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
|
||||||
|
# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
|
||||||
|
# etcd_ionice: "-c2 -n0"
|
||||||
|
|
||||||
etcd_metrics: "basic"
|
etcd_metrics: "basic"
|
||||||
|
|
||||||
# Limits
|
# Limits
|
||||||
|
|
|
@ -65,7 +65,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then
|
||||||
cp $SSLDIR/{ca.pem,ca-key.pem} .
|
cp $SSLDIR/{ca.pem,ca-key.pem} .
|
||||||
else
|
else
|
||||||
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
|
openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ETCD member
|
# ETCD member
|
||||||
|
@ -75,12 +75,12 @@ if [ -n "$MASTERS" ]; then
|
||||||
# Member key
|
# Member key
|
||||||
openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
|
openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
|
||||||
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
|
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
|
||||||
openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||||
|
|
||||||
# Admin key
|
# Admin key
|
||||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
|
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
|
||||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ if [ -n "$HOSTS" ]; then
|
||||||
cn="${host%%.*}"
|
cn="${host%%.*}"
|
||||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
|
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
|
||||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_API: 3
|
ETCDCTL_API: 3
|
||||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
|
|
@ -7,17 +7,33 @@
|
||||||
- reload etcd
|
- reload etcd
|
||||||
- wait for etcd up
|
- wait for etcd up
|
||||||
|
|
||||||
|
- name: restart etcd-events
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- etcd-events | reload systemd
|
||||||
|
- reload etcd-events
|
||||||
|
- wait for etcd-events up
|
||||||
|
|
||||||
- import_tasks: backup.yml
|
- import_tasks: backup.yml
|
||||||
|
|
||||||
- name: etcd | reload systemd
|
- name: etcd | reload systemd
|
||||||
command: systemctl daemon-reload
|
command: systemctl daemon-reload
|
||||||
|
|
||||||
|
- name: etcd-events | reload systemd
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
|
||||||
- name: reload etcd
|
- name: reload etcd
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: restarted
|
state: restarted
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: reload etcd-events
|
||||||
|
service:
|
||||||
|
name: etcd-events
|
||||||
|
state: restarted
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
- name: wait for etcd up
|
- name: wait for etcd up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||||
|
@ -29,6 +45,17 @@
|
||||||
retries: 10
|
retries: 10
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
|
- name: wait for etcd-events up
|
||||||
|
uri:
|
||||||
|
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2381/health"
|
||||||
|
validate_certs: no
|
||||||
|
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||||
|
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||||
|
register: result
|
||||||
|
until: result.status is defined and result.status == 200
|
||||||
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
|
||||||
- name: set etcd_secret_changed
|
- name: set etcd_secret_changed
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_secret_changed: true
|
etcd_secret_changed: true
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Configure | Check if member is in cluster
|
- name: Configure | Check if member is in etcd cluster
|
||||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
register: etcd_member_in_cluster
|
register: etcd_member_in_cluster
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -9,8 +9,21 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
|
- name: Configure | Check if member is in etcd-events cluster
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
|
register: etcd_events_member_in_cluster
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
check_mode: no
|
||||||
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
tags:
|
||||||
|
- facts
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Copy etcd.service systemd file
|
||||||
template:
|
template:
|
||||||
|
@ -20,11 +33,36 @@
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
- name: Configure | Join member(s) to cluster one at a time
|
- name: Configure | Copy etcd-events.service systemd file
|
||||||
include_tasks: join_member.yml
|
template:
|
||||||
|
src: "etcd-events-host.service.j2"
|
||||||
|
dest: /etc/systemd/system/etcd-events.service
|
||||||
|
backup: yes
|
||||||
|
when: is_etcd_master and etcd_deployment_type == "host" and etcd_events_cluster_setup
|
||||||
|
notify: restart etcd-events
|
||||||
|
|
||||||
|
- name: Configure | Copy etcd-events.service systemd file
|
||||||
|
template:
|
||||||
|
src: "etcd-events-docker.service.j2"
|
||||||
|
dest: /etc/systemd/system/etcd-events.service
|
||||||
|
backup: yes
|
||||||
|
when: is_etcd_master and etcd_deployment_type == "docker" and etcd_events_cluster_setup
|
||||||
|
notify: restart etcd-events
|
||||||
|
|
||||||
|
- name: Configure | Join member(s) to etcd cluster one at a time
|
||||||
|
include_tasks: join_etcd_member.yml
|
||||||
vars:
|
vars:
|
||||||
target_node: "{{ item }}"
|
target_node: "{{ item }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
pause: 10
|
pause: 10
|
||||||
with_items: "{{ groups['etcd'] }}"
|
with_items: "{{ groups['etcd'] }}"
|
||||||
when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||||
|
|
||||||
|
- name: Configure | Join member(s) to etcd-events cluster one at a time
|
||||||
|
include_tasks: join_etcd-evetns_member.yml
|
||||||
|
vars:
|
||||||
|
target_node: "{{ item }}"
|
||||||
|
loop_control:
|
||||||
|
pause: 10
|
||||||
|
with_items: "{{ groups['etcd'] }}"
|
||||||
|
when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0
|
||||||
|
|
|
@ -18,3 +18,13 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
backup: yes
|
backup: yes
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
|
- name: Install etcd-events launch script
|
||||||
|
template:
|
||||||
|
src: etcd-events.j2
|
||||||
|
dest: "{{ bin_dir }}/etcd-events"
|
||||||
|
owner: 'root'
|
||||||
|
mode: 0755
|
||||||
|
backup: yes
|
||||||
|
when: etcd_events_cluster_setup
|
||||||
|
notify: restart etcd-events
|
||||||
|
|
47
roles/etcd/tasks/join_etcd-events_member.yml
Normal file
47
roles/etcd/tasks/join_etcd-events_member.yml
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
---
|
||||||
|
- name: Join Member | Add member to cluster
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} member add {{ etcd_member_name }} {{ etcd_events_peer_url }}"
|
||||||
|
register: member_add_result
|
||||||
|
until: member_add_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
|
- include_tasks: refresh_config.yml
|
||||||
|
vars:
|
||||||
|
etcd_events_peer_addresses: >-
|
||||||
|
{% for host in groups['etcd'] -%}
|
||||||
|
{%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
|
||||||
|
{{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2382,
|
||||||
|
{%- endif -%}
|
||||||
|
{%- if loop.last -%}
|
||||||
|
{{ etcd_member_name }}={{ etcd_events_peer_url }}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | reload systemd
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | Ensure etcd-events is running
|
||||||
|
service:
|
||||||
|
name: etcd-events
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | Ensure member is in etcd-events cluster
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}"
|
||||||
|
register: etcd_events_member_in_cluster
|
||||||
|
changed_when: false
|
||||||
|
check_mode: no
|
||||||
|
tags:
|
||||||
|
- facts
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
47
roles/etcd/tasks/join_etcd_member.yml
Normal file
47
roles/etcd/tasks/join_etcd_member.yml
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
---
|
||||||
|
- name: Join Member | Add member to cluster
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||||
|
register: member_add_result
|
||||||
|
until: member_add_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
|
- include_tasks: refresh_config.yml
|
||||||
|
vars:
|
||||||
|
etcd_peer_addresses: >-
|
||||||
|
{% for host in groups['etcd'] -%}
|
||||||
|
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
|
||||||
|
{{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2380,
|
||||||
|
{%- endif -%}
|
||||||
|
{%- if loop.last -%}
|
||||||
|
{{ etcd_member_name }}={{ etcd_peer_url }}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | reload systemd
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | Ensure etcd is running
|
||||||
|
service:
|
||||||
|
name: etcd
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
|
||||||
|
- name: Join Member | Ensure member is in cluster
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
|
register: etcd_member_in_cluster
|
||||||
|
changed_when: false
|
||||||
|
check_mode: no
|
||||||
|
tags:
|
||||||
|
- facts
|
||||||
|
when: target_node == inventory_hostname
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
|
@ -7,8 +7,8 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
|
@ -43,5 +43,5 @@
|
||||||
- facts
|
- facts
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -29,13 +29,13 @@
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- import_tasks: set_cluster_health.yml
|
- include_tasks: set_cluster_health.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: configure.yml
|
- include_tasks: configure.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- name: Restart etcd if certs changed
|
- name: Restart etcd if certs changed
|
||||||
|
@ -43,6 +43,11 @@
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
when: is_etcd_master and etcd_secret_changed|default(false)
|
when: is_etcd_master and etcd_secret_changed|default(false)
|
||||||
|
|
||||||
|
- name: Restart etcd-events if certs changed
|
||||||
|
command: /bin/true
|
||||||
|
notify: restart etcd
|
||||||
|
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
|
||||||
|
|
||||||
# reload-systemd
|
# reload-systemd
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
@ -53,11 +58,18 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
|
- name: Ensure etcd-events is running
|
||||||
|
service:
|
||||||
|
name: etcd-events
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
||||||
# After etcd cluster is assembled, make sure that
|
# After etcd cluster is assembled, make sure that
|
||||||
# initial state of the cluster is in `existing`
|
# initial state of the cluster is in `existing`
|
||||||
# state insted of `new`.
|
# state insted of `new`.
|
||||||
- import_tasks: set_cluster_health.yml
|
- include_tasks: set_cluster_health.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
|
@ -5,3 +5,10 @@
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Refresh config | Create etcd-events config file
|
||||||
|
template:
|
||||||
|
src: etcd-events.env.j2
|
||||||
|
dest: /etc/etcd-events.env
|
||||||
|
notify: restart etcd-events
|
||||||
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Configure | Check if cluster is healthy
|
- name: Configure | Check if etcd cluster is healthy
|
||||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||||
register: etcd_cluster_is_healthy
|
register: etcd_cluster_is_healthy
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -9,5 +9,18 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
|
- name: Configure | Check if etcd-events cluster is healthy
|
||||||
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||||
|
register: etcd_events_cluster_is_healthy
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
check_mode: no
|
||||||
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
tags:
|
||||||
|
- facts
|
||||||
|
environment:
|
||||||
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
18
roles/etcd/templates/etcd-events-docker.service.j2
Normal file
18
roles/etcd/templates/etcd-events-docker.service.j2
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
[Unit]
|
||||||
|
Description=etcd docker wrapper
|
||||||
|
Wants=docker.socket
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
EnvironmentFile=-/etc/etcd-events.env
|
||||||
|
ExecStart={{ bin_dir }}/etcd-events
|
||||||
|
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name }}-events
|
||||||
|
ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name }}-events
|
||||||
|
Restart=always
|
||||||
|
RestartSec=15s
|
||||||
|
TimeoutStartSec=30s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
16
roles/etcd/templates/etcd-events-host.service.j2
Normal file
16
roles/etcd/templates/etcd-events-host.service.j2
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
[Unit]
|
||||||
|
Description=etcd
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
User=root
|
||||||
|
EnvironmentFile=/etc/etcd-events.env
|
||||||
|
ExecStart={{ bin_dir }}/etcd
|
||||||
|
NotifyAccess=all
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10s
|
||||||
|
LimitNOFILE=40000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
29
roles/etcd/templates/etcd-events.env.j2
Normal file
29
roles/etcd/templates/etcd-events.env.j2
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
ETCD_DATA_DIR={{ etcd_events_data_dir }}
|
||||||
|
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }}
|
||||||
|
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
|
||||||
|
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
||||||
|
|
||||||
|
ETCD_METRICS={{ etcd_metrics }}
|
||||||
|
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2381,https://127.0.0.1:2381
|
||||||
|
ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
|
||||||
|
ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
|
||||||
|
ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd
|
||||||
|
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382
|
||||||
|
ETCD_NAME={{ etcd_member_name }}-events
|
||||||
|
ETCD_PROXY=off
|
||||||
|
ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }}
|
||||||
|
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
|
||||||
|
{% if etcd_snapshot_count is defined %}
|
||||||
|
ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# TLS settings
|
||||||
|
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||||
|
ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
|
||||||
|
ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
|
||||||
|
ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}}
|
||||||
|
|
||||||
|
ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||||
|
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
|
||||||
|
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
|
||||||
|
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
|
21
roles/etcd/templates/etcd-events.j2
Normal file
21
roles/etcd/templates/etcd-events.j2
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
#!/bin/bash
|
||||||
|
{{ docker_bin_dir }}/docker run \
|
||||||
|
--restart=on-failure:5 \
|
||||||
|
--env-file=/etc/etcd-events.env \
|
||||||
|
--net=host \
|
||||||
|
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
||||||
|
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
||||||
|
-v {{ etcd_events_data_dir }}:{{ etcd_events_data_dir }}:rw \
|
||||||
|
{% if etcd_memory_limit is defined %}
|
||||||
|
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
|
||||||
|
{% endif %}
|
||||||
|
{% if etcd_cpu_limit is defined %}
|
||||||
|
--cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
|
||||||
|
{% endif %}
|
||||||
|
{% if etcd_blkio_weight is defined %}
|
||||||
|
--blkio-weight={{ etcd_blkio_weight }} \
|
||||||
|
{% endif %}
|
||||||
|
--name={{ etcd_member_name }}-events \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
/usr/local/bin/etcd \
|
||||||
|
"$@"
|
|
@ -13,6 +13,9 @@ ETCD_NAME={{ etcd_member_name }}
|
||||||
ETCD_PROXY=off
|
ETCD_PROXY=off
|
||||||
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
||||||
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
|
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
|
||||||
|
{% if etcd_snapshot_count is defined %}
|
||||||
|
ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# TLS settings
|
# TLS settings
|
||||||
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||||
|
|
|
@ -6,17 +6,19 @@
|
||||||
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
||||||
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
||||||
-v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
|
-v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
|
||||||
{% if etcd_memory_limit is defined %}
|
{% if etcd_memory_limit is defined %}
|
||||||
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
|
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--oom-kill-disable \
|
{% if etcd_cpu_limit is defined %}
|
||||||
{% if etcd_cpu_limit is defined %}
|
|
||||||
--cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
|
--cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if etcd_blkio_weight is defined %}
|
{% if etcd_blkio_weight is defined %}
|
||||||
--blkio-weight={{ etcd_blkio_weight }} \
|
--blkio-weight={{ etcd_blkio_weight }} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--name={{ etcd_member_name | default("etcd") }} \
|
--name={{ etcd_member_name | default("etcd") }} \
|
||||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_ionice is defined %}
|
||||||
|
/bin/ionice {{ etcd_ionice }} \
|
||||||
|
{% endif %}
|
||||||
/usr/local/bin/etcd \
|
/usr/local/bin/etcd \
|
||||||
"$@"
|
"$@"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
[req]
|
{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
|
||||||
req_extensions = v3_req
|
req_extensions = v3_req
|
||||||
distinguished_name = req_distinguished_name
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer
|
||||||
[alt_names]
|
[alt_names]
|
||||||
DNS.1 = localhost
|
DNS.1 = localhost
|
||||||
{% for host in groups['etcd'] %}
|
{% for host in groups['etcd'] %}
|
||||||
DNS.{{ 1 + loop.index }} = {{ host }}
|
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if loadbalancer_apiserver is defined %}
|
{% if apiserver_loadbalancer_domain_name is defined %}
|
||||||
{% set idx = groups['etcd'] | length | int + 2 %}
|
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
|
||||||
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set idx = groups['etcd'] | length | int + 3 %}
|
|
||||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||||
DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
|
DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['etcd'] %}
|
{% for host in groups['etcd'] %}
|
||||||
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||||
|
{% endif %}
|
||||||
|
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set idx = groups['etcd'] | length | int * 2 + 1 %}
|
IP.{{ counter["ip"] }} = 127.0.0.1
|
||||||
IP.{{ idx }} = 127.0.0.1
|
|
||||||
|
|
|
@ -10,6 +10,9 @@ dns_memory_requests: 70Mi
|
||||||
kubedns_min_replicas: 2
|
kubedns_min_replicas: 2
|
||||||
kubedns_nodes_per_replica: 10
|
kubedns_nodes_per_replica: 10
|
||||||
|
|
||||||
|
# CoreDNS
|
||||||
|
coredns_replicas: 2
|
||||||
|
|
||||||
# Images
|
# Images
|
||||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
||||||
kubedns_image_tag: "{{ kubedns_version }}"
|
kubedns_image_tag: "{{ kubedns_version }}"
|
||||||
|
@ -41,9 +44,7 @@ netchecker_server_memory_requests: 64M
|
||||||
# Dashboard
|
# Dashboard
|
||||||
dashboard_enabled: true
|
dashboard_enabled: true
|
||||||
dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64
|
dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64
|
||||||
dashboard_image_tag: v1.8.1
|
dashboard_image_tag: v1.8.3
|
||||||
dashboard_init_image_repo: gcr.io/google_containers/kubernetes-dashboard-init-amd64
|
|
||||||
dashboard_init_image_tag: v1.0.1
|
|
||||||
|
|
||||||
# Limits for dashboard
|
# Limits for dashboard
|
||||||
dashboard_cpu_limit: 100m
|
dashboard_cpu_limit: 100m
|
||||||
|
|
54
roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
Normal file
54
roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Delete old CoreDNS resources
|
||||||
|
kube:
|
||||||
|
name: "coredns"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- 'deploy'
|
||||||
|
- 'configmap'
|
||||||
|
- 'svc'
|
||||||
|
tags:
|
||||||
|
- upgrade
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||||
|
kube:
|
||||||
|
name: "coredns"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "deploy"
|
||||||
|
state: absent
|
||||||
|
when:
|
||||||
|
- kubeadm_enabled|default(false)
|
||||||
|
- kubeadm_init.changed|default(false)
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Delete old KubeDNS resources
|
||||||
|
kube:
|
||||||
|
name: "kube-dns"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- 'deploy'
|
||||||
|
- 'svc'
|
||||||
|
tags:
|
||||||
|
- upgrade
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Delete kubeadm KubeDNS
|
||||||
|
kube:
|
||||||
|
name: "kube-dns"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- 'deploy'
|
||||||
|
- 'svc'
|
||||||
|
when:
|
||||||
|
- kubeadm_enabled|default(false)
|
||||||
|
- kubeadm_init.changed|default(false)
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
39
roles/kubernetes-apps/ansible/tasks/coredns.yml
Normal file
39
roles/kubernetes-apps/ansible/tasks/coredns.yml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay Down CoreDNS Template
|
||||||
|
template:
|
||||||
|
src: "{{ item.file }}.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
|
with_items:
|
||||||
|
- { name: coredns, file: coredns-config.yml, type: configmap }
|
||||||
|
- { name: coredns, file: coredns-sa.yml, type: sa }
|
||||||
|
- { name: coredns, file: coredns-deployment.yml, type: deployment }
|
||||||
|
- { name: coredns, file: coredns-svc.yml, type: svc }
|
||||||
|
- { name: coredns, file: coredns-clusterrole.yml, type: clusterrole }
|
||||||
|
- { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding }
|
||||||
|
register: coredns_manifests
|
||||||
|
vars:
|
||||||
|
clusterIP: "{{ skydns_server }}"
|
||||||
|
when:
|
||||||
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
||||||
|
tags:
|
||||||
|
- coredns
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
|
with_items:
|
||||||
|
- { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment }
|
||||||
|
- { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc }
|
||||||
|
register: coredns_secondary_manifests
|
||||||
|
vars:
|
||||||
|
clusterIP: "{{ skydns_server_secondary }}"
|
||||||
|
coredns_ordinal_suffix: "-secondary"
|
||||||
|
when:
|
||||||
|
- dns_mode == 'coredns_dual'
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
||||||
|
tags:
|
||||||
|
- coredns
|
|
@ -22,7 +22,7 @@
|
||||||
- name: Kubernetes Apps | Start dashboard
|
- name: Kubernetes Apps | Start dashboard
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
|
41
roles/kubernetes-apps/ansible/tasks/kubedns.yml
Normal file
41
roles/kubernetes-apps/ansible/tasks/kubedns.yml
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
||||||
|
template:
|
||||||
|
src: "{{ item.file }}.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
|
with_items:
|
||||||
|
- { name: kube-dns, file: kubedns-sa.yml, type: sa }
|
||||||
|
- { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
|
||||||
|
- { name: kube-dns, file: kubedns-svc.yml, type: svc }
|
||||||
|
- { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
|
||||||
|
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
|
||||||
|
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
|
||||||
|
- { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
|
||||||
|
register: kubedns_manifests
|
||||||
|
when:
|
||||||
|
- dns_mode in ['kubedns','dnsmasq_kubedns']
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
||||||
|
tags:
|
||||||
|
- dnsmasq
|
||||||
|
|
||||||
|
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
|
||||||
|
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
|
||||||
|
command: >
|
||||||
|
{{ bin_dir }}/kubectl patch clusterrole system:kube-dns
|
||||||
|
--patch='{
|
||||||
|
"rules": [
|
||||||
|
{
|
||||||
|
"apiGroups" : [""],
|
||||||
|
"resources" : ["endpoints", "services"],
|
||||||
|
"verbs": ["list", "watch", "get"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
when:
|
||||||
|
- dns_mode in ['kubedns', 'dnsmasq_kubedns']
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
||||||
|
tags:
|
||||||
|
- dnsmasq
|
|
@ -11,82 +11,49 @@
|
||||||
delay: 2
|
delay: 2
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete old kubedns resources
|
- name: Kubernetes Apps | Cleanup DNS
|
||||||
kube:
|
import_tasks: tasks/cleanup_dns.yml
|
||||||
name: "kubedns"
|
when:
|
||||||
namespace: "{{ system_namespace }}"
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- 'deploy'
|
|
||||||
- 'svc'
|
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete kubeadm kubedns
|
- name: Kubernetes Apps | CoreDNS
|
||||||
kube:
|
import_tasks: "tasks/coredns.yml"
|
||||||
name: "kubedns"
|
|
||||||
namespace: "{{ system_namespace }}"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "deploy"
|
|
||||||
state: absent
|
|
||||||
when:
|
when:
|
||||||
- kubeadm_enabled|default(false)
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- kubeadm_init.changed|default(false)
|
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
|
||||||
template:
|
|
||||||
src: "{{ item.file }}.j2"
|
|
||||||
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
|
||||||
with_items:
|
|
||||||
- { name: kube-dns, file: kubedns-sa.yml, type: sa }
|
|
||||||
- { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
|
|
||||||
- { name: kube-dns, file: kubedns-svc.yml, type: svc }
|
|
||||||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
|
|
||||||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
|
|
||||||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
|
|
||||||
- { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
|
|
||||||
register: manifests
|
|
||||||
when:
|
|
||||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
|
||||||
- rbac_enabled or item.type not in rbac_resources
|
|
||||||
tags:
|
tags:
|
||||||
- dnsmasq
|
- coredns
|
||||||
|
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
|
- name: Kubernetes Apps | KubeDNS
|
||||||
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
|
import_tasks: "tasks/kubedns.yml"
|
||||||
command: >
|
|
||||||
{{ bin_dir }}/kubectl patch clusterrole system:kube-dns
|
|
||||||
--patch='{
|
|
||||||
"rules": [
|
|
||||||
{
|
|
||||||
"apiGroups" : [""],
|
|
||||||
"resources" : ["endpoints", "services"],
|
|
||||||
"verbs": ["list", "watch", "get"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
when:
|
when:
|
||||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
- dns_mode in ['kubedns', 'dnsmasq_kubedns']
|
||||||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
tags:
|
tags:
|
||||||
- dnsmasq
|
- dnsmasq
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start Resources
|
- name: Kubernetes Apps | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items:
|
||||||
|
- "{{ kubedns_manifests.results | default({}) }}"
|
||||||
|
- "{{ coredns_manifests.results | default({}) }}"
|
||||||
|
- "{{ coredns_secondary_manifests.results | default({}) }}"
|
||||||
when:
|
when:
|
||||||
- dns_mode != 'none'
|
- dns_mode != 'none'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- not item|skipped
|
- not item|skipped
|
||||||
|
register: resource_result
|
||||||
|
until: resource_result|succeeded
|
||||||
|
retries: 4
|
||||||
|
delay: 5
|
||||||
tags:
|
tags:
|
||||||
- dnsmasq
|
- dnsmasq
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
name: system:coredns
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
- services
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
addonmanager.kubernetes.io/mode: EnsureExists
|
||||||
|
name: system:coredns
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:coredns
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
|
@ -0,0 +1,22 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
addonmanager.kubernetes.io/mode: EnsureExists
|
||||||
|
data:
|
||||||
|
Corefile: |
|
||||||
|
.:53 {
|
||||||
|
errors
|
||||||
|
health
|
||||||
|
kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa {
|
||||||
|
pods insecure
|
||||||
|
upstream /etc/resolv.conf
|
||||||
|
fallthrough in-addr.arpa ip6.arpa
|
||||||
|
}
|
||||||
|
prometheus :9153
|
||||||
|
proxy . /etc/resolv.conf
|
||||||
|
cache 30
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
---
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
kubernetes.io/name: "CoreDNS"
|
||||||
|
spec:
|
||||||
|
replicas: {{ coredns_replicas }}
|
||||||
|
strategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 0
|
||||||
|
maxSurge: 10%
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: coredns
|
||||||
|
{% endif %}
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: "CriticalAddonsOnly"
|
||||||
|
operator: "Exists"
|
||||||
|
containers:
|
||||||
|
- name: coredns
|
||||||
|
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
|
||||||
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
memory: {{ dns_memory_limit }}
|
||||||
|
requests:
|
||||||
|
cpu: {{ dns_cpu_requests }}
|
||||||
|
memory: {{ dns_memory_requests }}
|
||||||
|
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||||
|
volumeMounts:
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /etc/coredns
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 9153
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
dnsPolicy: Default
|
||||||
|
volumes:
|
||||||
|
- name: config-volume
|
||||||
|
configMap:
|
||||||
|
name: coredns
|
||||||
|
items:
|
||||||
|
- key: Corefile
|
||||||
|
path: Corefile
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
22
roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
Normal file
22
roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
kubernetes.io/name: "CoreDNS"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
|
clusterIP: {{ clusterIP }}
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
|
@ -25,7 +25,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard-certs
|
name: kubernetes-dashboard-certs
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
type: Opaque
|
type: Opaque
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -37,7 +37,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
||||||
---
|
---
|
||||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||||
|
@ -46,7 +46,7 @@ kind: Role
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: kubernetes-dashboard-minimal
|
name: kubernetes-dashboard-minimal
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
|
@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: kubernetes-dashboard-minimal
|
name: kubernetes-dashboard-minimal
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: Role
|
kind: Role
|
||||||
|
@ -89,7 +89,7 @@ roleRef:
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
||||||
---
|
---
|
||||||
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
||||||
|
@ -103,7 +103,7 @@ rules:
|
||||||
resources: ["services/proxy"]
|
resources: ["services/proxy"]
|
||||||
resourceNames: ["https:kubernetes-dashboard:"]
|
resourceNames: ["https:kubernetes-dashboard:"]
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||||
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
|
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"]
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -128,7 +128,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
revisionHistoryLimit: 10
|
revisionHistoryLimit: 10
|
||||||
|
@ -200,7 +200,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- port: 443
|
- port: 443
|
||||||
|
|
|
@ -17,7 +17,7 @@ kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
|
|
|
@ -17,11 +17,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
|
|
|
@ -17,4 +17,4 @@ kind: ServiceAccount
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
|
@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: kubedns-autoscaler
|
name: kubedns-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubedns-autoscaler
|
k8s-app: kubedns-autoscaler
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -40,7 +40,7 @@ spec:
|
||||||
memory: "10Mi"
|
memory: "10Mi"
|
||||||
command:
|
command:
|
||||||
- /cluster-proportional-autoscaler
|
- /cluster-proportional-autoscaler
|
||||||
- --namespace={{ system_namespace }}
|
- --namespace=kube-system
|
||||||
- --configmap=kubedns-autoscaler
|
- --configmap=kubedns-autoscaler
|
||||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||||
- --target=Deployment/kube-dns
|
- --target=Deployment/kube-dns
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: "{{system_namespace}}"
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -16,11 +16,13 @@
|
||||||
src: "node-crb.yml.j2"
|
src: "node-crb.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/node-crb.yml"
|
dest: "{{ kube_config_dir }}/node-crb.yml"
|
||||||
register: node_crb_manifest
|
register: node_crb_manifest
|
||||||
when: rbac_enabled
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
||||||
kube:
|
kube:
|
||||||
name: "system:node"
|
name: "kubespray:system:node"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "clusterrolebinding"
|
resource: "clusterrolebinding"
|
||||||
filename: "{{ kube_config_dir }}/node-crb.yml"
|
filename: "{{ kube_config_dir }}/node-crb.yml"
|
||||||
|
@ -28,32 +30,101 @@
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- node_crb_manifest.changed
|
- node_crb_manifest.changed
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
# This is not a cluster role, but should be run after kubeconfig is set on master
|
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
|
||||||
- name: Write kube system namespace manifest
|
|
||||||
template:
|
template:
|
||||||
src: namespace.j2
|
src: "node-webhook-cr.yml.j2"
|
||||||
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
register: node_webhook_cr_manifest
|
||||||
tags:
|
when:
|
||||||
- apps
|
- rbac_enabled
|
||||||
|
- kubelet_authorization_mode_webhook
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: node-webhook
|
||||||
|
|
||||||
- name: Check if kube system namespace exists
|
- name: Apply webhook ClusterRole
|
||||||
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
|
kube:
|
||||||
register: 'kubesystem'
|
name: "system:node-webhook"
|
||||||
changed_when: False
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
failed_when: False
|
resource: "clusterrole"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
||||||
tags:
|
state: latest
|
||||||
- apps
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- kubelet_authorization_mode_webhook
|
||||||
|
- node_webhook_cr_manifest.changed
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: node-webhook
|
||||||
|
|
||||||
- name: Create kube system namespace
|
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
|
||||||
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
template:
|
||||||
retries: 4
|
src: "node-webhook-crb.yml.j2"
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
||||||
register: create_system_ns
|
register: node_webhook_crb_manifest
|
||||||
until: create_system_ns.rc == 0
|
when:
|
||||||
changed_when: False
|
- rbac_enabled
|
||||||
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
|
- kubelet_authorization_mode_webhook
|
||||||
tags:
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- apps
|
tags: node-webhook
|
||||||
|
|
||||||
|
- name: Grant system:nodes the webhook ClusterRole
|
||||||
|
kube:
|
||||||
|
name: "system:node-webhook"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "clusterrolebinding"
|
||||||
|
filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
||||||
|
state: latest
|
||||||
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- kubelet_authorization_mode_webhook
|
||||||
|
- node_webhook_crb_manifest.changed
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: node-webhook
|
||||||
|
|
||||||
|
- name: Check if vsphere-cloud-provider ClusterRole exists
|
||||||
|
command: "{{ bin_dir }}/kubectl get clusterroles system:vsphere-cloud-provider"
|
||||||
|
register: vsphere_cloud_provider
|
||||||
|
ignore_errors: true
|
||||||
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- cloud_provider is defined
|
||||||
|
- cloud_provider == 'vsphere'
|
||||||
|
- kube_version | version_compare('v1.9.0', '>=')
|
||||||
|
- kube_version | version_compare('v1.9.3', '<=')
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: vsphere
|
||||||
|
|
||||||
|
- name: Write vsphere-cloud-provider ClusterRole manifest
|
||||||
|
template:
|
||||||
|
src: "vsphere-rbac.yml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/vsphere-rbac.yml"
|
||||||
|
register: vsphere_rbac_manifest
|
||||||
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- cloud_provider is defined
|
||||||
|
- cloud_provider == 'vsphere'
|
||||||
|
- vsphere_cloud_provider.rc is defined
|
||||||
|
- vsphere_cloud_provider.rc != 0
|
||||||
|
- kube_version | version_compare('v1.9.0', '>=')
|
||||||
|
- kube_version | version_compare('v1.9.3', '<=')
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: vsphere
|
||||||
|
|
||||||
|
- name: Apply vsphere-cloud-provider ClusterRole
|
||||||
|
kube:
|
||||||
|
name: "system:vsphere-cloud-provider"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "clusterrolebinding"
|
||||||
|
filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
|
||||||
|
state: latest
|
||||||
|
when:
|
||||||
|
- rbac_enabled
|
||||||
|
- cloud_provider is defined
|
||||||
|
- cloud_provider == 'vsphere'
|
||||||
|
- vsphere_cloud_provider.rc is defined
|
||||||
|
- vsphere_cloud_provider.rc != 0
|
||||||
|
- kube_version | version_compare('v1.9.0', '>=')
|
||||||
|
- kube_version | version_compare('v1.9.3', '<=')
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
tags: vsphere
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Namespace
|
kind: Namespace
|
||||||
metadata:
|
metadata:
|
||||||
name: "{{system_namespace}}"
|
name: "kube-system"
|
||||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
||||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/bootstrapping: rbac-defaults
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
name: system:node
|
name: kubespray:system:node
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
name: system:node-webhook
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes/proxy
|
||||||
|
- nodes/stats
|
||||||
|
- nodes/log
|
||||||
|
- nodes/spec
|
||||||
|
- nodes/metrics
|
||||||
|
verbs:
|
||||||
|
- "*"
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
name: system:node-webhook
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:node-webhook
|
||||||
|
subjects:
|
||||||
|
- apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Group
|
||||||
|
name: system:nodes
|
|
@ -0,0 +1,35 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: system:vsphere-cloud-provider
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: system:vsphere-cloud-provider
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:vsphere-cloud-provider
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: vsphere-cloud-provider
|
||||||
|
namespace: kube-system
|
|
@ -10,7 +10,7 @@
|
||||||
when: rbac_enabled
|
when: rbac_enabled
|
||||||
|
|
||||||
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
|
||||||
with_items:
|
with_items:
|
||||||
- "efk-sa.yml"
|
- "efk-sa.yml"
|
||||||
- "efk-clusterrolebinding.yml"
|
- "efk-clusterrolebinding.yml"
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
register: es_deployment_manifest
|
register: es_deployment_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES deployment"
|
- name: "ElasticSearch | Create ES deployment"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: es_deployment_manifest.changed
|
when: es_deployment_manifest.changed
|
||||||
|
|
||||||
|
@ -35,6 +35,6 @@
|
||||||
register: es_service_manifest
|
register: es_service_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES service"
|
- name: "ElasticSearch | Create ES service"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: es_service_manifest.changed
|
when: es_service_manifest.changed
|
||||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging-v1
|
name: elasticsearch-logging-v1
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
version: "{{ elasticsearch_image_tag }}"
|
version: "{{ elasticsearch_image_tag }}"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging
|
name: elasticsearch-logging
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -17,6 +17,6 @@
|
||||||
register: fluentd_ds_manifest
|
register: fluentd_ds_manifest
|
||||||
|
|
||||||
- name: "Fluentd | Create fluentd daemonset"
|
- name: "Fluentd | Create fluentd daemonset"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: fluentd_ds_manifest.changed
|
when: fluentd_ds_manifest.changed
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: fluentd-config
|
name: fluentd-config
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
data:
|
data:
|
||||||
{{ fluentd_config_file }}: |
|
{{ fluentd_config_file }}: |
|
||||||
# This configuration file for Fluentd / td-agent is used
|
# This configuration file for Fluentd / td-agent is used
|
||||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
name: "fluentd-es-v{{ fluentd_version }}"
|
name: "fluentd-es-v{{ fluentd_version }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: fluentd-es
|
k8s-app: fluentd-es
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue