This commit is contained in:
woopstar 2018-05-02 13:37:15 +02:00 committed by Andreas Kruger
commit 4c81cd2a71
104 changed files with 663 additions and 294 deletions

3
.gitignore vendored
View file

@ -1,6 +1,7 @@
.vagrant .vagrant
*.retry *.retry
inventory/vagrant_ansible_inventory **/vagrant_ansible_inventory
inventory/credentials/
inventory/group_vars/fake_hosts.yml inventory/group_vars/fake_hosts.yml
inventory/host_vars/ inventory/host_vars/
temp temp

View file

@ -20,6 +20,7 @@ variables:
GCE_PREEMPTIBLE: "false" GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1" ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg ANSIBLE_CONFIG: ./tests/ansible.cfg
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false" IDEMPOT_CHECK: "false"
RESET_CHECK: "false" RESET_CHECK: "false"
UPGRADE_TEST: "false" UPGRADE_TEST: "false"
@ -90,9 +91,9 @@ before_script:
- cd tests && make create-${CI_PLATFORM} -s ; cd - - cd tests && make create-${CI_PLATFORM} -s ; cd -
# Check out latest tag if testing upgrade # Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags # Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1)) #- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c - test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
# Checkout the CI vars file so it is available # Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021 # Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
@ -102,14 +103,13 @@ before_script:
# Create cluster # Create cluster
- > - >
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
${SSH_ARGS} ${SSH_ARGS}
${LOG_LEVEL} ${LOG_LEVEL}
-e @${CI_TEST_VARS} -e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER} -e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
@ -122,14 +122,13 @@ before_script:
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}"; git checkout "${CI_BUILD_REF}";
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
${SSH_ARGS} ${SSH_ARGS}
${LOG_LEVEL} ${LOG_LEVEL}
-e @${CI_TEST_VARS} -e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER} -e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
@ -139,20 +138,20 @@ before_script:
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
- > - >
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod ## Ping the between 2 pod
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks ## Advanced DNS checks
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment) ## Idempotency checks 1/5 (repeat deployment)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -169,7 +168,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -184,7 +183,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -201,7 +200,7 @@ before_script:
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook ansible-playbook
-i inventory/sample/hosts.ini -i ${ANSIBLE_INVENTORY}
-b --become-user=root -b --become-user=root
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER -u $SSH_USER
@ -217,7 +216,7 @@ before_script:
## Idempotency checks 5/5 (Advanced DNS checks) ## Idempotency checks 5/5 (Advanced DNS checks)
- > - >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts" --limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@ -309,6 +308,10 @@ before_script:
# stage: deploy-special # stage: deploy-special
MOVED_TO_GROUP_VARS: "true" MOVED_TO_GROUP_VARS: "true"
.opensuse_canal_variables: &opensuse_canal_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto) # Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
### PR JOBS PART1 ### PR JOBS PART1
@ -590,6 +593,17 @@ gce_centos7-calico-ha-triggers:
when: on_success when: on_success
only: ['triggers'] only: ['triggers']
gce_opensuse-canal:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *opensuse_canal_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613 # no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha: gce_coreos-alpha-weave-ha:
stage: deploy-special stage: deploy-special

View file

@ -1,11 +1,11 @@
![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png) ![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-incubator/kubespray/master/docs/img/kubernetes-logo.png)
Deploy a Production Ready Kubernetes Cluster Deploy a Production Ready Kubernetes Cluster
============================================ ============================================
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** -   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
- **High available** cluster - **High available** cluster
- **Composable** (Choice of the network plugin for instance) - **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions** - Support most popular **Linux distributions**
@ -52,6 +52,7 @@ Documents
- [Vagrant install](docs/vagrant.md) - [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md) - [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md) - [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md)
- [Downloaded artifacts](docs/downloads.md) - [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md) - [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md) - [OpenStack](docs/openstack.md)
@ -66,10 +67,11 @@ Supported Linux Distributions
----------------------------- -----------------------------
- **Container Linux by CoreOS** - **Container Linux by CoreOS**
- **Debian** Jessie - **Debian** Jessie, Stretch, Wheezy
- **Ubuntu** 16.04 - **Ubuntu** 16.04
- **CentOS/RHEL** 7 - **CentOS/RHEL** 7
- **Fedora/CentOS** Atomic - **Fedora/CentOS** Atomic
- **openSUSE** Leap 42.3/Tumbleweed
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
@ -83,7 +85,7 @@ Versions of supported components
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8 - [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
- [contiv](https://github.com/contiv/install/releases) v1.1.7 - [contiv](https://github.com/contiv/install/releases) v1.1.7
- [weave](http://weave.works/) v2.2.1 - [weave](http://weave.works/) v2.3.0
- [docker](https://www.docker.com/) v17.03 (see note) - [docker](https://www.docker.com/) v17.03 (see note)
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2) - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
@ -105,6 +107,9 @@ Requirements
- **Your ssh key must be copied** to all the servers part of your inventory. - **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method
should be configured in the target servers. Then the `ansible_become` flag
or command parameters `--become or -b` should be specified.
Network Plugins Network Plugins
--------------- ---------------

9
Vagrantfile vendored
View file

@ -18,6 +18,8 @@ SUPPORTED_OS = {
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"}, "centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
} }
# Defaults for config options defined in CONFIG # Defaults for config options defined in CONFIG
@ -52,7 +54,7 @@ end
$box = SUPPORTED_OS[$os][:box] $box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example # if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory $inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
# if $inventory has a hosts file use it, otherwise copy over vars etc # if $inventory has a hosts file use it, otherwise copy over vars etc
# to where vagrant expects dynamic inventory to be. # to where vagrant expects dynamic inventory to be.
@ -84,7 +86,6 @@ Vagrant.configure("2") do |config|
if Vagrant.has_plugin?("vagrant-vbguest") then if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false config.vbguest.auto_update = false
end end
(1..$num_instances).each do |i| (1..$num_instances).each do |i|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name config.vm.hostname = vm_name
@ -110,8 +111,10 @@ Vagrant.configure("2") do |config|
end end
end end
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
$shared_folders.each do |src, dst| $shared_folders.each do |src, dst|
config.vm.synced_folder src, dst config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end end
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb|

View file

@ -13,4 +13,3 @@ callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
jinja2_extensions = jinja2.ext.do

View file

@ -33,7 +33,7 @@
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker, when: manage_docker|default(true) }
- role: rkt - role: rkt
tags: rkt tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"

View file

@ -46,7 +46,7 @@ ssh -F ./ssh-bastion.conf user@$ip
Example (this one assumes you are using CoreOS) Example (this one assumes you are using CoreOS)
```commandline ```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
``` ```
***Using other distrib than CoreOs*** ***Using other distrib than CoreOs***
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf. If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.

View file

@ -1 +1 @@
../../inventory/group_vars ../../inventory/local/group_vars

View file

@ -135,7 +135,7 @@ the one you want to use with the environment variable `OS_CLOUD`:
export OS_CLOUD=mycloud export OS_CLOUD=mycloud
``` ```
##### Openrc method (deprecated) ##### Openrc method
When using classic environment variables, Terraform uses default `OS_*` When using classic environment variables, Terraform uses default `OS_*`
environment variables. A script suitable for your environment may be available environment variables. A script suitable for your environment may be available
@ -218,6 +218,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
#### Terraform state files #### Terraform state files
@ -299,11 +300,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
#### Bastion host #### Bastion host
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that. Bastion access will be determined by:
``` - Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"' - The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
```
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
So, either a bastion host, or at least master/node with a floating IP are required.
#### Test access #### Test access

View file

@ -48,6 +48,7 @@ module "compute" {
k8s_master_fips = "${module.ips.k8s_master_fips}" k8s_master_fips = "${module.ips.k8s_master_fips}"
k8s_node_fips = "${module.ips.k8s_node_fips}" k8s_node_fips = "${module.ips.k8s_node_fips}"
bastion_fips = "${module.ips.bastion_fips}" bastion_fips = "${module.ips.bastion_fips}"
supplementary_master_groups = "${var.supplementary_master_groups}"
network_id = "${module.network.router_id}" network_id = "${module.network.router_id}"
} }

View file

@ -83,7 +83,7 @@ resource "openstack_compute_instance_v2" "bastion" {
} }
provisioner "local-exec" { provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml" command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
} }
} }
@ -107,10 +107,14 @@ resource "openstack_compute_instance_v2" "k8s_master" {
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,k8s-cluster,vault" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
} }
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
} }
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
@ -125,15 +129,20 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
} }
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}", security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
"${openstack_compute_secgroup_v2.k8s.name}", "${openstack_compute_secgroup_v2.k8s.name}",
] ]
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,k8s-cluster,vault" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
} }
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
} }
resource "openstack_compute_instance_v2" "etcd" { resource "openstack_compute_instance_v2" "etcd" {
@ -175,7 +184,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
} }
@ -198,7 +207,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
} }
@ -226,6 +235,10 @@ resource "openstack_compute_instance_v2" "k8s_node" {
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
} }
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
} }
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {

View file

@ -55,3 +55,7 @@ variable "k8s_node_fips" {
variable "bastion_fips" { variable "bastion_fips" {
type = "list" type = "list"
} }
variable "supplementary_master_groups" {
default = ""
}

View file

@ -111,3 +111,8 @@ variable "floatingip_pool" {
variable "external_net" { variable "external_net" {
description = "uuid of the external/public network" description = "uuid of the external/public network"
} }
variable "supplementary_master_groups" {
description = "supplementary kubespray ansible groups for masters, such kube-node"
default = ""
}

View file

@ -169,3 +169,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is
``` ```
calico_node_ignorelooserpf: true calico_node_ignorelooserpf: true
``` ```
Note that in OpenStack you must allow `ipip` traffic in your security groups,
otherwise you will experience timeouts.
To do this you must add a rule which allows it, for example:
```
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

View file

@ -25,8 +25,8 @@ There are related application specifc variables:
netchecker_port: 31081 netchecker_port: 31081
agent_report_interval: 15 agent_report_interval: 15
netcheck_namespace: default netcheck_namespace: default
agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0" agent_img: "mirantis/k8s-netchecker-agent:v1.2.2"
server_img: "quay.io/l23network/k8s-netchecker-server:v1.0" server_img: "mirantis/k8s-netchecker-server:v1.2.2"
``` ```
Note that the application verifies DNS resolve for FQDNs comprising only the Note that the application verifies DNS resolve for FQDNs comprising only the

19
docs/opensuse.md Normal file
View file

@ -0,0 +1,19 @@
openSUSE Leap 42.3 and Tumbleweed
===============
openSUSE Leap installation Notes:
- Install Ansible
```
sudo zypper ref
sudo zypper -n install ansible
```
- Install Jinja2 and Python-Netaddr
```sudo zypper -n install python-Jinja2 python-netaddr```
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)

View file

@ -8,8 +8,8 @@
version: "{{ item.version }}" version: "{{ item.version }}"
state: "{{ item.state }}" state: "{{ item.state }}"
with_items: with_items:
- { state: "present", name: "docker", version: "2.7.0" } - { state: "present", name: "docker", version: "3.2.1" }
- { state: "present", name: "docker-compose", version: "1.18.0" } - { state: "present", name: "docker-compose", version: "1.21.0" }
- name: CephFS Provisioner | Check Go version - name: CephFS Provisioner | Check Go version
shell: | shell: |
@ -36,18 +36,18 @@
git: git:
repo: https://github.com/kubernetes-incubator/external-storage.git repo: https://github.com/kubernetes-incubator/external-storage.git
dest: "~/go/src/github.com/kubernetes-incubator" dest: "~/go/src/github.com/kubernetes-incubator"
version: 92295a30 version: a71a49d4
clone: no clone: no
update: yes update: yes
- name: CephFS Provisioner | Build image - name: CephFS Provisioner | Build image
shell: | shell: |
cd ~/go/src/github.com/kubernetes-incubator/external-storage cd ~/go/src/github.com/kubernetes-incubator/external-storage
REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs REGISTRY=quay.io/kubespray/ VERSION=a71a49d4 make ceph/cephfs
- name: CephFS Provisioner | Push image - name: CephFS Provisioner | Push image
docker_image: docker_image:
name: quay.io/kubespray/cephfs-provisioner:92295a30 name: quay.io/kubespray/cephfs-provisioner:a71a49d4
push: yes push: yes
retries: 10 retries: 10

View file

@ -1,8 +1,8 @@
# Kubernetes configuration dirs and system namespace. # Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes # Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets. # the kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace. # This puts them in a sane location and namespace.
# Editting those values will almost surely break something. # Editing those values will almost surely break something.
kube_config_dir: /etc/kubernetes kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests" kube_manifest_dir: "{{ kube_config_dir }}/manifests"
@ -28,7 +28,7 @@ local_release_dir: "/tmp/releases"
retry_stagger: 5 retry_stagger: 5
# This is the group that the cert creation scripts chgrp the # This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable... # cert files to. Not really changeable...
kube_cert_group: kube-cert kube_cert_group: kube-cert
# Cluster Loglevel configuration # Cluster Loglevel configuration
@ -58,7 +58,9 @@ kube_users:
## Optional settings for OIDC ## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub # kube_oidc_username_claim: sub
# kube_oidc_username_prefix: oidc:
# kube_oidc_groups_claim: groups # kube_oidc_groups_claim: groups
# kube_oidc_groups_prefix: oidc:
# Choose network plugin (cilium, calico, contiv, weave or flannel) # Choose network plugin (cilium, calico, contiv, weave or flannel)
@ -162,15 +164,9 @@ dashboard_enabled: true
# Monitoring apps for k8s # Monitoring apps for k8s
efk_enabled: false efk_enabled: false
# Helm deployment. Needs for Prometheus Operator, k8s metrics. # Helm deployment
helm_enabled: false helm_enabled: false
# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
prometheus_operator_enabled: false
# K8s cluster metrics. Installed Helm and Prometheus Operator are required.
k8s_metrics_enabled: false
# Istio deployment # Istio deployment
istio_enabled: false istio_enabled: false

View file

@ -1,14 +1,14 @@
# ## Configure 'ip' variable to bind kubernetes services on a # ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface # ## different ip than the default iface
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1 # node1 ansible_host=95.54.0.12 # ip=10.3.0.1
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2 # node2 ansible_host=95.54.0.13 # ip=10.3.0.2
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3 # node3 ansible_host=95.54.0.14 # ip=10.3.0.3
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4 # node4 ansible_host=95.54.0.15 # ip=10.3.0.4
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5 # node5 ansible_host=95.54.0.16 # ip=10.3.0.5
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6 # node6 ansible_host=95.54.0.17 # ip=10.3.0.6
# ## configure a bastion host if your nodes are not directly reachable # ## configure a bastion host if your nodes are not directly reachable
# bastion ansible_ssh_host=x.x.x.x # bastion ansible_host=x.x.x.x ansible_user=some_user
# [kube-master] # [kube-master]
# node1 # node1

View file

@ -22,7 +22,6 @@
failed_when: false failed_when: false
changed_when: false changed_when: false
check_mode: no check_mode: no
when: need_bootstrap.rc != 0
tags: tags:
- facts - facts
@ -30,24 +29,24 @@
copy: copy:
src: get-pip.py src: get-pip.py
dest: ~/get-pip.py dest: ~/get-pip.py
when: need_pip != 0 when: need_pip.rc != 0
- name: Bootstrap | Install pip - name: Bootstrap | Install pip
shell: "{{ansible_python_interpreter}} ~/get-pip.py" shell: "{{ansible_python_interpreter}} ~/get-pip.py"
when: need_pip != 0 when: need_pip.rc != 0
- name: Bootstrap | Remove get-pip.py - name: Bootstrap | Remove get-pip.py
file: file:
path: ~/get-pip.py path: ~/get-pip.py
state: absent state: absent
when: need_pip != 0 when: need_pip.rc != 0
- name: Bootstrap | Install pip launcher - name: Bootstrap | Install pip launcher
copy: copy:
src: runner src: runner
dest: /opt/bin/pip dest: /opt/bin/pip
mode: 0755 mode: 0755
when: need_pip != 0 when: need_pip.rc != 0
- name: Install required python modules - name: Install required python modules
pip: pip:

View file

@ -0,0 +1,7 @@
---
- name: Install required packages (SUSE)
package:
name: "{{ item }}"
state: present
with_items:
- python-cryptography

View file

@ -11,6 +11,9 @@
- import_tasks: bootstrap-centos.yml - import_tasks: bootstrap-centos.yml
when: bootstrap_os == "centos" when: bootstrap_os == "centos"
- import_tasks: bootstrap-opensuse.yml
when: bootstrap_os == "opensuse"
- import_tasks: setup-pipelining.yml - import_tasks: setup-pipelining.yml
- name: check if atomic host - name: check if atomic host
@ -26,18 +29,25 @@
gather_subset: '!all' gather_subset: '!all'
filter: ansible_* filter: ansible_*
- name: Assign inventory name to unconfigured hostnames (non-CoreOS) - name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed)
hostname: hostname:
name: "{{inventory_hostname}}" name: "{{inventory_hostname}}"
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname when:
- override_system_hostname
- ansible_distribution not in ['openSUSE Tumbleweed']
- ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
- name: Assign inventory name to unconfigured hostnames (CoreOS only) - name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only)
command: "hostnamectl set-hostname {{inventory_hostname}}" command: "hostnamectl set-hostname {{inventory_hostname}}"
register: hostname_changed register: hostname_changed
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname when:
- ansible_hostname == 'localhost'
- ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
- override_system_hostname
- name: Update hostname fact (CoreOS only) - name: Update hostname fact (CoreOS and Tumbleweed only)
setup: setup:
gather_subset: '!all' gather_subset: '!all'
filter: ansible_hostname filter: ansible_hostname
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed when:
- hostname_changed.changed

View file

@ -15,6 +15,14 @@
tags: tags:
- facts - facts
# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
# openSUSE version so we can't use it. The only alternative is to use the docker
# packages from the distribution repositories.
- name: Warn about Docker version on SUSE
debug:
msg: "SUSE distributions always install Docker from the distro repos"
when: ansible_pkg_mgr == 'zypper'
- include_tasks: set_facts_dns.yml - include_tasks: set_facts_dns.yml
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
tags: tags:
@ -43,7 +51,7 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ docker_repo_key_info.repo_keys }}" with_items: "{{ docker_repo_key_info.repo_keys }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
- name: ensure docker-ce repository is enabled - name: ensure docker-ce repository is enabled
action: "{{ docker_repo_info.pkg_repo }}" action: "{{ docker_repo_info.pkg_repo }}"
@ -51,7 +59,7 @@
repo: "{{item}}" repo: "{{item}}"
state: present state: present
with_items: "{{ docker_repo_info.repos }}" with_items: "{{ docker_repo_info.repos }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0)
- name: ensure docker-engine repository public key is installed - name: ensure docker-engine repository public key is installed
action: "{{ dockerproject_repo_key_info.pkg_key }}" action: "{{ dockerproject_repo_key_info.pkg_key }}"
@ -64,7 +72,7 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ dockerproject_repo_key_info.repo_keys }}" with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
- name: ensure docker-engine repository is enabled - name: ensure docker-engine repository is enabled
action: "{{ dockerproject_repo_info.pkg_repo }}" action: "{{ dockerproject_repo_info.pkg_repo }}"
@ -72,7 +80,7 @@
repo: "{{item}}" repo: "{{item}}"
state: present state: present
with_items: "{{ dockerproject_repo_info.repos }}" with_items: "{{ dockerproject_repo_info.repos }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
- name: Configure docker repository on RedHat/CentOS - name: Configure docker repository on RedHat/CentOS
template: template:
@ -110,6 +118,12 @@
notify: restart docker notify: restart docker
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
- name: ensure service is started if docker packages are already present
service:
name: docker
state: started
when: docker_task_result is not changed
- name: flush handlers so we can wait for docker to come up - name: flush handlers so we can wait for docker to come up
meta: flush_handlers meta: flush_handlers

View file

@ -6,7 +6,9 @@
with_items: with_items:
- docker - docker
- docker-engine - docker-engine
when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) when:
- ansible_os_family == 'Debian'
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
- name: Ensure old versions of Docker are not installed. | RedHat - name: Ensure old versions of Docker are not installed. | RedHat
package: package:
@ -17,4 +19,7 @@
- docker-common - docker-common
- docker-engine - docker-engine
- docker-selinux - docker-selinux
when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) when:
- ansible_os_family == 'RedHat'
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
- not is_atomic

View file

@ -7,6 +7,9 @@ Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %} {% elif ansible_os_family == "Debian" %}
After=network.target docker.socket After=network.target docker.socket
Wants=docker.socket Wants=docker.socket
{% elif ansible_os_family == "Suse" %}
After=network.target containerd.socket containerd.service
Requires=containerd.socket containerd.service
{% endif %} {% endif %}
[Service] [Service]
@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID
Delegate=yes Delegate=yes
KillMode=process KillMode=process
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \ ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
{% if ansible_os_family == "Suse" %}
--containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
{% endif %}
$DOCKER_OPTS \ $DOCKER_OPTS \
$DOCKER_STORAGE_OPTIONS \ $DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \ $DOCKER_NETWORK_OPTIONS \

View file

@ -0,0 +1,15 @@
---
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: zypper
pkgs:
- name: docker
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View file

@ -38,7 +38,7 @@ flannel_version: "v0.10.0"
flannel_cni_version: "v0.3.0" flannel_cni_version: "v0.3.0"
istio_version: "0.2.6" istio_version: "0.2.6"
vault_version: 0.8.1 vault_version: 0.8.1
weave_version: 2.2.1 weave_version: 2.3.0
pod_infra_version: 3.0 pod_infra_version: 3.0
contiv_version: 1.1.7 contiv_version: 1.1.7
cilium_version: "v1.0.0-rc8" cilium_version: "v1.0.0-rc8"
@ -70,16 +70,32 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}" calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}" calico_rr_image_tag: "{{ calico_rr_version }}"
istio_proxy_image_repo: docker.io/istio/proxy
istio_proxy_image_tag: "{{ istio_version }}"
istio_proxy_init_image_repo: docker.io/istio/proxy_init
istio_proxy_init_image_tag: "{{ istio_version }}"
istio_ca_image_repo: docker.io/istio/istio-ca
istio_ca_image_tag: "{{ istio_version }}"
istio_mixer_image_repo: docker.io/istio/mixer
istio_mixer_image_tag: "{{ istio_version }}"
istio_pilot_image_repo: docker.io/istio/pilot
istio_pilot_image_tag: "{{ istio_version }}"
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
istio_proxy_debug_image_tag: "{{ istio_version }}"
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
istio_statsd_image_repo: prom/statsd-exporter
istio_statsd_image_tag: latest
hyperkube_image_repo: "gcr.io/google-containers/hyperkube" hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
hyperkube_image_tag: "{{ kube_version }}" hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}" pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat" install_socat_image_repo: "xueshanf/install-socat"
install_socat_image_tag: "latest" install_socat_image_tag: "latest"
netcheck_version: "v1.0" netcheck_version: "v1.2.2"
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent" netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}" netcheck_agent_tag: "{{ netcheck_version }}"
netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server" netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
netcheck_server_tag: "{{ netcheck_version }}" netcheck_server_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "weaveworks/weave-kube" weave_kube_image_repo: "weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}" weave_kube_image_tag: "{{ weave_version }}"
@ -134,13 +150,15 @@ registry_image_repo: "registry"
registry_image_tag: "2.6" registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4" registry_proxy_image_tag: "0.4"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.0.0"
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
cephfs_provisioner_image_tag: "92295a30" cephfs_provisioner_image_tag: "a71a49d4"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.11.0" ingress_nginx_controller_image_tag: "0.14.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4" ingress_nginx_default_backend_image_tag: "1.4"
cert_manager_version: "v0.2.3" cert_manager_version: "v0.2.4"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}" cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
@ -197,6 +215,70 @@ downloads:
mode: "0755" mode: "0755"
groups: groups:
- kube-master - kube-master
istio_proxy:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_image_repo }}"
tag: "{{ istio_proxy_image_tag }}"
sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
groups:
- kube-node
istio_proxy_init:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_init_image_repo }}"
tag: "{{ istio_proxy_init_image_tag }}"
sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
groups:
- kube-node
istio_ca:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_ca_image_repo }}"
tag: "{{ istio_ca_image_tag }}"
sha256: "{{ istio_ca_digest_checksum|default(None) }}"
groups:
- kube-node
istio_mixer:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_mixer_image_repo }}"
tag: "{{ istio_mixer_image_tag }}"
sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
groups:
- kube-node
istio_pilot:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_pilot_image_repo }}"
tag: "{{ istio_pilot_image_tag }}"
sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
groups:
- kube-node
istio_proxy_debug:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_debug_image_repo }}"
tag: "{{ istio_proxy_debug_image_tag }}"
sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
groups:
- kube-node
istio_sidecar_initializer:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_sidecar_initializer_image_repo }}"
tag: "{{ istio_sidecar_initializer_image_tag }}"
sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
groups:
- kube-node
istio_statsd:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_statsd_image_repo }}"
tag: "{{ istio_statsd_image_tag }}"
sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
groups:
- kube-node
hyperkube: hyperkube:
enabled: true enabled: true
container: true container: true
@ -451,6 +533,14 @@ downloads:
sha256: "{{ registry_proxy_digest_checksum|default(None) }}" sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
groups: groups:
- kube-node - kube-node
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
container: true
repo: "{{ local_volume_provisioner_image_repo }}"
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
cephfs_provisioner: cephfs_provisioner:
enabled: "{{ cephfs_provisioner_enabled }}" enabled: "{{ cephfs_provisioner_enabled }}"
container: true container: true

View file

@ -2,12 +2,11 @@
- name: container_download | Make download decision if pull is required by tag or sha256 - name: container_download | Make download decision if pull is required by tag or sha256
include_tasks: set_docker_image_facts.yml include_tasks: set_docker_image_facts.yml
delegate_to: "{{ download_delegate if download_run_once or omit }}" delegate_to: "{{ download_delegate if download_run_once or omit }}"
delegate_facts: no delegate_facts: yes
run_once: "{{ download_run_once }}" run_once: "{{ download_run_once }}"
when: when:
- download.enabled - download.enabled
- download.container - download.container
- group_names | intersect(download.groups) | length
tags: tags:
- facts - facts
@ -24,7 +23,6 @@
- download.enabled - download.enabled
- download.container - download.container
- pull_required|default(download_always_pull) - pull_required|default(download_always_pull)
- group_names | intersect(download.groups) | length
delegate_to: "{{ download_delegate }}" delegate_to: "{{ download_delegate }}"
delegate_facts: yes delegate_facts: yes
run_once: yes run_once: yes

View file

@ -22,3 +22,4 @@
- item.value.enabled - item.value.enabled
- item.value.container - item.value.container
- download_run_once - download_run_once
- group_names | intersect(download.groups) | length

View file

@ -7,7 +7,6 @@
when: when:
- download.enabled - download.enabled
- download.container - download.container
- group_names | intersect(download.groups) | length
tags: tags:
- facts - facts
@ -18,7 +17,7 @@
- download.enabled - download.enabled
- download.container - download.container
- download_run_once - download_run_once
- group_names | intersect(download.groups) | length
tags: tags:
- facts - facts
@ -29,7 +28,6 @@
- download.enabled - download.enabled
- download.container - download.container
- download_run_once - download_run_once
- group_names | intersect(download.groups) | length
- name: "container_download | Update the 'container_changed' fact" - name: "container_download | Update the 'container_changed' fact"
set_fact: set_fact:
@ -39,14 +37,13 @@
- download.container - download.container
- download_run_once - download_run_once
- pull_required|default(download_always_pull) - pull_required|default(download_always_pull)
- group_names | intersect(download.groups) | length
run_once: "{{ download_run_once }}" run_once: "{{ download_run_once }}"
tags: tags:
- facts - facts
- name: container_download | Stat saved container image - name: container_download | Stat saved container image
stat: stat:
path: "{{fname}}" path: "{{ fname }}"
register: img register: img
changed_when: false changed_when: false
delegate_to: "{{ download_delegate }}" delegate_to: "{{ download_delegate }}"
@ -57,7 +54,6 @@
- download.enabled - download.enabled
- download.container - download.container
- download_run_once - download_run_once
- group_names | intersect(download.groups) | length
tags: tags:
- facts - facts
@ -73,7 +69,6 @@
- download_run_once - download_run_once
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
- (container_changed or not img.stat.exists) - (container_changed or not img.stat.exists)
- group_names | intersect(download.groups) | length
- name: container_download | copy container images to ansible host - name: container_download | copy container images to ansible host
synchronize: synchronize:
@ -93,7 +88,6 @@
- inventory_hostname == download_delegate - inventory_hostname == download_delegate
- download_delegate != "localhost" - download_delegate != "localhost"
- saved.changed - saved.changed
- group_names | intersect(download.groups) | length
- name: container_download | upload container images to nodes - name: container_download | upload container images to nodes
synchronize: synchronize:
@ -115,7 +109,6 @@
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != download_delegate or inventory_hostname != download_delegate or
download_delegate == "localhost") download_delegate == "localhost")
- group_names | intersect(download.groups) | length
tags: tags:
- upload - upload
- upgrade - upgrade
@ -128,7 +121,6 @@
- download_run_once - download_run_once
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != download_delegate or download_delegate == "localhost") inventory_hostname != download_delegate or download_delegate == "localhost")
- group_names | intersect(download.groups) | length
tags: tags:
- upload - upload
- upgrade - upgrade

View file

@ -32,6 +32,12 @@ etcd_election_timeout: "5000"
etcd_metrics: "basic" etcd_metrics: "basic"
## A dictionary of extra environment variables to add to etcd.env, formatted like:
## etcd_extra_vars:
## ETCD_VAR1: "value1"
## ETCD_VAR2: "value2"
etcd_extra_vars: {}
# Limits # Limits
# Limit memory only if <4GB memory on host. 0=unlimited # Limit memory only if <4GB memory on host. 0=unlimited
etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}" etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}"

View file

@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/etcd-ca.crt /etc/pki/ca-trust/source/anchors/etcd-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/etcd-ca.pem /etc/ssl/certs/etcd-ca.pem
{%- elif ansible_os_family == "Suse" -%}
/etc/pki/trust/anchors/etcd-ca.pem
{%- endif %} {%- endif %}
tags: tags:
- facts - facts
@ -19,9 +21,9 @@
remote_src: true remote_src: true
register: etcd_ca_cert register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat) - name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract command: update-ca-trust extract

View file

@ -27,3 +27,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
{% for key, value in etcd_extra_vars.iteritems() %}
{{ key }}={{ value }}
{% endfor %}

View file

@ -7,3 +7,6 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
verbs: ["list"] verbs: ["list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ['*']

View file

@ -104,6 +104,7 @@
- rbac_enabled - rbac_enabled
- cloud_provider is defined - cloud_provider is defined
- cloud_provider == 'vsphere' - cloud_provider == 'vsphere'
- vsphere_cloud_provider.rc is defined
- vsphere_cloud_provider.rc != 0 - vsphere_cloud_provider.rc != 0
- kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.0', '>=')
- kube_version | version_compare('v1.9.3', '<=') - kube_version | version_compare('v1.9.3', '<=')
@ -121,6 +122,7 @@
- rbac_enabled - rbac_enabled
- cloud_provider is defined - cloud_provider is defined
- cloud_provider == 'vsphere' - cloud_provider == 'vsphere'
- vsphere_cloud_provider.rc is defined
- vsphere_cloud_provider.rc != 0 - vsphere_cloud_provider.rc != 0
- kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.0', '>=')
- kube_version | version_compare('v1.9.3', '<=') - kube_version | version_compare('v1.9.3', '<=')

View file

@ -30,12 +30,12 @@ spec:
limits: limits:
cpu: {{ elasticsearch_cpu_limit }} cpu: {{ elasticsearch_cpu_limit }}
{% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %} {% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
mem: {{ elasticsearch_mem_limit }} memory: "{{ elasticsearch_mem_limit }}"
{% endif %} {% endif %}
requests: requests:
cpu: {{ elasticsearch_cpu_requests }} cpu: {{ elasticsearch_cpu_requests }}
{% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %} {% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
mem: {{ elasticsearch_mem_requests }} memory: "{{ elasticsearch_mem_requests }}"
{% endif %} {% endif %}
ports: ports:
- containerPort: 9200 - containerPort: 9200

View file

@ -26,12 +26,12 @@ spec:
limits: limits:
cpu: {{ kibana_cpu_limit }} cpu: {{ kibana_cpu_limit }}
{% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %} {% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
mem: {{ kibana_mem_limit }} memory: "{{ kibana_mem_limit }}"
{% endif %} {% endif %}
requests: requests:
cpu: {{ kibana_cpu_requests }} cpu: {{ kibana_cpu_requests }}
{% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %} {% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
mem: {{ kibana_mem_requests }} memory: "{{ kibana_mem_requests }}"
{% endif %} {% endif %}
env: env:
- name: "ELASTICSEARCH_URL" - name: "ELASTICSEARCH_URL"

View file

@ -1,7 +1,4 @@
--- ---
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
local_volume_provisioner_image_tag: v2.0.0
local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_namespace: "kube-system"
local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks

View file

@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io name: certificates.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View file

@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io name: clusterissuers.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View file

@ -5,7 +5,7 @@ metadata:
name: cert-manager name: cert-manager
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
rules: rules:

View file

@ -5,7 +5,7 @@ metadata:
name: cert-manager name: cert-manager
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
roleRef: roleRef:

View file

@ -6,7 +6,7 @@ metadata:
namespace: {{ cert_manager_namespace }} namespace: {{ cert_manager_namespace }}
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View file

@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io name: issuers.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View file

@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }} namespace: {{ cert_manager_namespace }}
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.5 chart: cert-manager-0.2.8
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller

View file

@ -1,32 +1,2 @@
--- ---
istio_enabled: false
istio_namespace: istio-system istio_namespace: istio-system
istio_version: "0.2.6"
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
istio_proxy_image_repo: docker.io/istio/proxy
istio_proxy_image_tag: "{{ istio_version }}"
istio_proxy_init_image_repo: docker.io/istio/proxy_init
istio_proxy_init_image_tag: "{{ istio_version }}"
istio_ca_image_repo: docker.io/istio/istio-ca
istio_ca_image_tag: "{{ istio_version }}"
istio_mixer_image_repo: docker.io/istio/mixer
istio_mixer_image_tag: "{{ istio_version }}"
istio_pilot_image_repo: docker.io/istio/pilot
istio_pilot_image_tag: "{{ istio_version }}"
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
istio_proxy_debug_image_tag: "{{ istio_version }}"
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
istio_statsd_image_repo: prom/statsd-exporter
istio_statsd_image_tag: latest

View file

@ -27,12 +27,6 @@ dependencies:
- apps - apps
- registry - registry
- role: kubernetes-apps/metrics
when: prometheus_operator_enabled
tags:
- apps
- metrics
# istio role should be last because it takes a long time to initialize and # istio role should be last because it takes a long time to initialize and
# will cause timeouts trying to start other addons. # will cause timeouts trying to start other addons.
- role: kubernetes-apps/istio - role: kubernetes-apps/istio

View file

@ -1,9 +0,0 @@
---
# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
prometheus_operator_enabled: false
# K8s cluster metrics. Installed Helm and Prometheus Operators are required.
k8s_metrics_enabled: false
# Separate namespace for monitoring/metrics
monitoring_namespace: "monitoring"

View file

@ -1,32 +0,0 @@
---
- name: Metrics | Make sure Helm is installed
command: "{{ bin_dir }}/helm version"
register: helm_ready_result
until: helm_ready_result|succeeded
retries: 4
delay: 5
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
- name: Metrics | Add coreos repo
command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/"
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true
- name: Metrics | Install Prometheus Operator
command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}"
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true
- name: Metrics | Install K8s cluster metrics
command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}"
when:
- prometheus_operator_enabled
- k8s_metrics_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true

View file

@ -34,7 +34,7 @@
{{ bin_dir }}/kubectl get secrets --all-namespaces {{ bin_dir }}/kubectl get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token | grep kubernetes.io/service-account-token
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller' | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
register: tokens_to_delete register: tokens_to_delete
when: needs_rotation when: needs_rotation

View file

@ -55,7 +55,7 @@
- name: Copy kubectl binary to ansible host - name: Copy kubectl binary to ansible host
fetch: fetch:
src: "{{ bin_dir }}/kubectl" src: "{{ bin_dir }}/kubectl"
dest: "{{ bin_dir }}/kubectl" dest: "{{ artifacts_dir }}/kubectl"
flat: yes flat: yes
validate_checksum: no validate_checksum: no
become: no become: no
@ -68,8 +68,6 @@
#!/bin/bash #!/bin/bash
kubectl --kubeconfig=admin.conf $@ kubectl --kubeconfig=admin.conf $@
dest: "{{ artifacts_dir }}/kubectl.sh" dest: "{{ artifacts_dir }}/kubectl.sh"
owner: root
group: root
mode: 0755 mode: 0755
become: no become: no
run_once: yes run_once: yes

View file

@ -52,7 +52,7 @@ kube_apiserver_admission_control:
{%- if kube_version | version_compare('v1.9', '<') -%} {%- if kube_version | version_compare('v1.9', '<') -%}
GenericAdmissionWebhook GenericAdmissionWebhook
{%- else -%} {%- else -%}
ValidatingAdmissionWebhook MutatingAdmissionWebhook,ValidatingAdmissionWebhook
{%- endif -%} {%- endif -%}
- ResourceQuota - ResourceQuota
@ -73,7 +73,9 @@ kube_oidc_auth: false
## Optional settings for OIDC ## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub # kube_oidc_username_claim: sub
# kube_oidc_username_prefix: oidc:
# kube_oidc_groups_claim: groups # kube_oidc_groups_claim: groups
# kube_oidc_groups_prefix: oidc:
## Variables for custom flags ## Variables for custom flags
apiserver_custom_flags: [] apiserver_custom_flags: []

View file

@ -9,6 +9,10 @@
- {src: apiserver-key.pem, dest: apiserver.key} - {src: apiserver-key.pem, dest: apiserver.key}
- {src: ca.pem, dest: ca.crt} - {src: ca.pem, dest: ca.crt}
- {src: ca-key.pem, dest: ca.key} - {src: ca-key.pem, dest: ca.key}
- {src: front-proxy-ca.pem, dest: front-proxy-ca.crt}
- {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key}
- {src: front-proxy-client.pem, dest: front-proxy-client.crt}
- {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
- {src: service-account-key.pem, dest: sa.pub} - {src: service-account-key.pem, dest: sa.pub}
- {src: service-account-key.pem, dest: sa.key} - {src: service-account-key.pem, dest: sa.key}
register: kubeadm_copy_old_certs register: kubeadm_copy_old_certs

View file

@ -73,9 +73,15 @@ spec:
{% if kube_oidc_username_claim is defined %} {% if kube_oidc_username_claim is defined %}
- --oidc-username-claim={{ kube_oidc_username_claim }} - --oidc-username-claim={{ kube_oidc_username_claim }}
{% endif %} {% endif %}
{% if kube_oidc_username_prefix is defined %}
- "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
{% endif %}
{% if kube_oidc_groups_claim is defined %} {% if kube_oidc_groups_claim is defined %}
- --oidc-groups-claim={{ kube_oidc_groups_claim }} - --oidc-groups-claim={{ kube_oidc_groups_claim }}
{% endif %} {% endif %}
{% if kube_oidc_groups_prefix is defined %}
- "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
{% endif %}
{% endif %} {% endif %}
- --secure-port={{ kube_apiserver_port }} - --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }} - --insecure-port={{ kube_apiserver_insecure_port }}
@ -111,7 +117,7 @@ spec:
- --feature-gates={{ kube_feature_gates|join(',') }} - --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %} {% endif %}
{% if kube_version | version_compare('v1.9', '>=') %} {% if kube_version | version_compare('v1.9', '>=') %}
- --requestheader-client-ca-file={{ kube_cert_dir }}/ca.pem - --requestheader-client-ca-file={{ kube_cert_dir }}/front-proxy-ca.pem
- --requestheader-allowed-names=front-proxy-client - --requestheader-allowed-names=front-proxy-client
- --requestheader-extra-headers-prefix=X-Remote-Extra- - --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group - --requestheader-group-headers=X-Remote-Group

View file

@ -29,6 +29,7 @@ spec:
- --leader-elect=true - --leader-elect=true
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
{% if volume_cross_zone_attachment %} {% if volume_cross_zone_attachment %}
- --use-legacy-policy-config
- --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml - --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
{% endif %} {% endif %}
- --profiling=false - --profiling=false

View file

@ -92,3 +92,48 @@ kube_cadvisor_port: 0
# The read-only port for the Kubelet to serve on with no authentication/authorization. # The read-only port for the Kubelet to serve on with no authentication/authorization.
kube_read_only_port: 0 kube_read_only_port: 0
# sysctl_file_path to add sysctl conf to
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true)) }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
#azure_aad_client_secret:
#azure_resource_group:
#azure_location:
#azure_subnet_name:
#azure_security_group_name:
#azure_vnet_name:
#azure_route_table_name:

View file

@ -61,6 +61,7 @@
name: net.ipv4.ip_local_reserved_ports name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}" value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes sysctl_set: yes
sysctl_file: "{{ sysctl_file_path }}"
state: present state: present
reload: yes reload: yes
when: kube_apiserver_node_port_range is defined when: kube_apiserver_node_port_range is defined
@ -96,6 +97,7 @@
sysctl: sysctl:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
sysctl_file: "{{ sysctl_file_path }}"
value: 1 value: 1
reload: yes reload: yes
when: sysctl_bridge_nf_call_iptables.rc == 0 when: sysctl_bridge_nf_call_iptables.rc == 0
@ -118,6 +120,19 @@
tags: tags:
- kube-proxy - kube-proxy
- name: Persist ip_vs modules
copy:
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
content: |
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Write proxy manifest - name: Write proxy manifest
template: template:
src: manifests/kube-proxy.manifest.j2 src: manifests/kube-proxy.manifest.j2
@ -134,6 +149,14 @@
tags: tags:
- kube-proxy - kube-proxy
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Write cloud-config - name: Write cloud-config
template: template:
src: "{{ cloud_provider }}-cloud-config.j2" src: "{{ cloud_provider }}-cloud-config.j2"

View file

@ -24,6 +24,15 @@
-v /var/lib/kubelet:/var/lib/kubelet:shared \ -v /var/lib/kubelet:/var/lib/kubelet:shared \
-v /var/lib/cni:/var/lib/cni:shared \ -v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \ -v /var/run:/var/run:rw \
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
{% endif -%}
{% if local_volume_provisioner_enabled -%}
-v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:rw \
-v {{ local_volume_provisioner_mount_dir }}:{{ local_volume_provisioner_mount_dir }}:rw \
{% endif %}
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \ -v /etc/os-release:/etc/os-release:ro \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \

View file

@ -23,9 +23,7 @@ ExecStart={{ bin_dir }}/kubelet \
Restart=always Restart=always
RestartSec=10s RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
ExecReload={{ docker_bin_dir }}/docker restart kubelet ExecReload={{ docker_bin_dir }}/docker restart kubelet

View file

@ -7,9 +7,7 @@ Wants=docker.socket
[Service] [Service]
User=root User=root
EnvironmentFile=-{{kube_config_dir}}/kubelet.env EnvironmentFile=-{{kube_config_dir}}/kubelet.env
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
ExecStart={{ bin_dir }}/kubelet \ ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \ $KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \ $KUBE_LOG_LEVEL \

View file

@ -12,10 +12,7 @@ LimitNOFILE=40000
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
EnvironmentFile={{kube_config_dir}}/kubelet.env EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts # stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
@ -41,8 +38,17 @@ ExecStart=/usr/bin/rkt run \
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \ --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \ --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
{% if kubelet_flexvolumes_plugins_dir is defined %} {# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \ --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
{% endif -%}
{% if local_volume_provisioner_enabled %}
--volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \
{# Not pretty, but needed to avoid double mount #}
{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
--volume local-volume-provisioner-mount-dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \
{% endif %}
{% endif %} {% endif %}
{% if kubelet_load_modules == true %} {% if kubelet_load_modules == true %}
--mount volume=modprobe,target=/usr/sbin/modprobe \ --mount volume=modprobe,target=/usr/sbin/modprobe \
@ -65,8 +71,17 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \ --mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \ --mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \ --mount volume=hosts,target=/etc/hosts \
{% if kubelet_flexvolumes_plugins_dir is defined %} {# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
{% endif -%}
{% if local_volume_provisioner_enabled %}
--mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \
{# Not pretty, but needed to avoid double mount #}
{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
--mount volume=local-volume-provisioner-mount-dir,target={{ local_volume_provisioner_mount_dir }} \
{% endif %}
{% endif %} {% endif %}
--stage1-from-dir=stage1-fly.aci \ --stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %} {% if kube_hyperkube_image_repo == "docker" %}

View file

@ -83,20 +83,20 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{# Kubelet node labels #} {# Kubelet node labels #}
{% set role_node_labels = [] %} {% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %} {% if inventory_hostname in groups['kube-master'] %}
{% do role_node_labels.append('node-role.kubernetes.io/master=true') %} {% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
{% if not standalone_kubelet|bool %} {% if not standalone_kubelet|bool %}
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %} {% endif %}
{% else %} {% else %}
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %} {% endif %}
{% if inventory_hostname in groups['kube-ingress']|default([]) %} {% if inventory_hostname in groups['kube-ingress']|default([]) %}
{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
{% endif %} {% endif %}
{% set inventory_node_labels = [] %} {% set inventory_node_labels = [] %}
{% if node_labels is defined %} {% if node_labels is defined %}
{% for labelname, labelvalue in node_labels.iteritems() %} {% for labelname, labelvalue in node_labels.iteritems() %}
{% do inventory_node_labels.append(labelname + '=' + labelvalue) %} {% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% set all_node_labels = role_node_labels + inventory_node_labels %} {% set all_node_labels = role_node_labels + inventory_node_labels %}
@ -110,9 +110,7 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %} {% endif %}
{% if kubelet_flexvolumes_plugins_dir is defined %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}" KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
{% endif %}
# Should this cluster be allowed to run privileged docker containers # Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true" KUBE_ALLOW_PRIV="--allow-privileged=true"

View file

@ -8,7 +8,7 @@ epel_enabled: false
common_required_pkgs: common_required_pkgs:
- python-httplib2 - python-httplib2
- openssl - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}"
- curl - curl
- rsync - rsync
- bash-completion - bash-completion
@ -23,35 +23,6 @@ disable_ipv6_dns: false
kube_cert_group: kube-cert kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes kube_config_dir: /etc/kubernetes
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs # for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
@ -60,3 +31,5 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
populate_inventory_to_hosts_file: true populate_inventory_to_hosts_file: true
preinstall_selinux_state: permissive preinstall_selinux_state: permissive
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"

View file

@ -15,7 +15,7 @@
notify: Preinstall | restart network notify: Preinstall | restart network
when: dhclientconffile is defined when: dhclientconffile is defined
- name: Configue dhclient hooks for resolv.conf (non-RH) - name: Configure dhclient hooks for resolv.conf (non-RH)
template: template:
src: dhclient_dnsupdate.sh.j2 src: dhclient_dnsupdate.sh.j2
dest: "{{ dhclienthookfile }}" dest: "{{ dhclienthookfile }}"
@ -24,7 +24,7 @@
notify: Preinstall | restart network notify: Preinstall | restart network
when: ansible_os_family != "RedHat" when: ansible_os_family != "RedHat"
- name: Configue dhclient hooks for resolv.conf (RH-only) - name: Configure dhclient hooks for resolv.conf (RH-only)
template: template:
src: dhclient_dnsupdate_rh.sh.j2 src: dhclient_dnsupdate_rh.sh.j2
dest: "{{ dhclienthookfile }}" dest: "{{ dhclienthookfile }}"

View file

@ -3,6 +3,12 @@
tags: tags:
- asserts - asserts
# This is run before bin_dir is pinned because these tasks are run on localhost
- import_tasks: pre_upgrade.yml
run_once: true
tags:
- upgrade
- name: Force binaries directory for Container Linux by CoreOS - name: Force binaries directory for Container Linux by CoreOS
set_fact: set_fact:
bin_dir: "/opt/bin" bin_dir: "/opt/bin"
@ -71,14 +77,6 @@
- cloud-provider - cloud-provider
- facts - facts
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Create cni directories - name: Create cni directories
file: file:
path: "{{ item }}" path: "{{ item }}"
@ -99,6 +97,20 @@
- contiv - contiv
- bootstrap-os - bootstrap-os
- name: Create local volume provisioner directories
file:
path: "{{ item }}"
state: directory
owner: kube
with_items:
- "{{ local_volume_provisioner_base_dir }}"
- "{{ local_volume_provisioner_mount_dir }}"
when:
- inventory_hostname in groups['k8s-cluster']
- local_volume_provisioner_enabled
tags:
- persistent_volumes
- import_tasks: resolvconf.yml - import_tasks: resolvconf.yml
when: when:
- dns_mode != 'none' - dns_mode != 'none'
@ -146,6 +158,15 @@
- not is_atomic - not is_atomic
tags: bootstrap-os tags: bootstrap-os
- name: Update package management cache (zypper) - SUSE
shell: zypper -n --gpg-auto-import-keys ref
register: make_cache_output
until: make_cache_output|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when:
- ansible_pkg_mgr == 'zypper'
tags: bootstrap-os
- name: Update package management cache (APT) - name: Update package management cache (APT)
apt: apt:
@ -224,12 +245,6 @@
tags: tags:
- bootstrap-os - bootstrap-os
- name: set default sysctl file path
set_fact:
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
tags:
- bootstrap-os
- name: Stat sysctl file configuration - name: Stat sysctl file configuration
stat: stat:
path: "{{sysctl_file_path}}" path: "{{sysctl_file_path}}"

View file

@ -0,0 +1,28 @@
---
- name: "Pre-upgrade | check if old credential dir exists"
local_action:
module: stat
path: "{{ inventory_dir }}/../credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
register: old_credential_dir
become: no
- name: "Pre-upgrade | check if new credential dir exists"
local_action:
module: stat
path: "{{ inventory_dir }}/credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
register: new_credential_dir
become: no
when: old_credential_dir.stat.exists
- name: "Pre-upgrade | move data from old credential dir to new"
local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials
args:
creates: "{{ inventory_dir }}/credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
become: no
when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists

View file

@ -12,7 +12,7 @@
- name: Stop if unknown OS - name: Stop if unknown OS
assert: assert:
that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS'] that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed']
ignore_errors: "{{ ignore_assert_errors }}" ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if unknown network plugin - name: Stop if unknown network plugin

View file

@ -0,0 +1,4 @@
---
required_pkgs:
- device-mapper
- ebtables

View file

@ -1,3 +1,4 @@
--- ---
kube_cert_group: kube-cert kube_cert_group: kube-cert
kube_vault_mount_path: kube kube_vault_mount_path: kube
front_proxy_vault_mount_path: front-proxy

View file

@ -72,6 +72,15 @@ else
openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
fi fi
# Front proxy client CA
if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
# Reuse existing front proxy CA
cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
else
openssl genrsa -out front-proxy-ca-key.pem 2048 > /dev/null 2>&1
openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days 36500 -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
fi
gen_key_and_cert() { gen_key_and_cert() {
local name=$1 local name=$1
local subject=$2 local subject=$2
@ -80,6 +89,14 @@ gen_key_and_cert() {
openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
} }
gen_key_and_cert_front_proxy() {
local name=$1
local subject=$2
openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
# Admins # Admins
if [ -n "$MASTERS" ]; then if [ -n "$MASTERS" ]; then
@ -105,7 +122,7 @@ if [ -n "$MASTERS" ]; then
# kube-controller-manager # kube-controller-manager
gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager" gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
# metrics aggregator # metrics aggregator
gen_key_and_cert "front-proxy-client" "/CN=front-proxy-client" gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
for host in $MASTERS; do for host in $MASTERS; do
cn="${host%%.*}" cn="${host%%.*}"

View file

@ -48,8 +48,11 @@
'{{ kube_cert_dir }}/kube-scheduler-key.pem', '{{ kube_cert_dir }}/kube-scheduler-key.pem',
'{{ kube_cert_dir }}/kube-controller-manager.pem', '{{ kube_cert_dir }}/kube-controller-manager.pem',
'{{ kube_cert_dir }}/kube-controller-manager-key.pem', '{{ kube_cert_dir }}/kube-controller-manager-key.pem',
'{{ kube_cert_dir }}/front-proxy-ca.pem',
'{{ kube_cert_dir }}/front-proxy-ca-key.pem',
'{{ kube_cert_dir }}/front-proxy-client.pem', '{{ kube_cert_dir }}/front-proxy-client.pem',
'{{ kube_cert_dir }}/front-proxy-client-key.pem', '{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %} {% for host in groups['kube-master'] %}
'{{ kube_cert_dir }}/admin-{{ host }}.pem' '{{ kube_cert_dir }}/admin-{{ host }}.pem'
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem' '{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
@ -71,7 +74,9 @@
{% for cert in ['apiserver.pem', 'apiserver-key.pem', {% for cert in ['apiserver.pem', 'apiserver-key.pem',
'kube-scheduler.pem','kube-scheduler-key.pem', 'kube-scheduler.pem','kube-scheduler-key.pem',
'kube-controller-manager.pem','kube-controller-manager-key.pem', 'kube-controller-manager.pem','kube-controller-manager-key.pem',
'front-proxy-client.pem','front-proxy-client-key.pem'] -%} 'front-proxy-ca.pem','front-proxy-ca-key.pem',
'front-proxy-client.pem','front-proxy-client-key.pem',
'service-account-key.pem'] -%}
{% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %} {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
{% if not cert_file in existing_certs -%} {% if not cert_file in existing_certs -%}
{%- set gen = True -%} {%- set gen = True -%}

View file

@ -73,6 +73,8 @@
'kube-scheduler-key.pem', 'kube-scheduler-key.pem',
'kube-controller-manager.pem', 'kube-controller-manager.pem',
'kube-controller-manager-key.pem', 'kube-controller-manager-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem', 'front-proxy-client.pem',
'front-proxy-client-key.pem', 'front-proxy-client-key.pem',
'service-account-key.pem', 'service-account-key.pem',
@ -85,6 +87,8 @@
'admin-{{ inventory_hostname }}-key.pem', 'admin-{{ inventory_hostname }}-key.pem',
'apiserver.pem', 'apiserver.pem',
'apiserver-key.pem', 'apiserver-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem', 'front-proxy-client.pem',
'front-proxy-client-key.pem', 'front-proxy-client-key.pem',
'service-account-key.pem', 'service-account-key.pem',

View file

@ -52,6 +52,11 @@
"{{ hostvars[host]['ip'] }}", "{{ hostvars[host]['ip'] }}",
{%- endif -%} {%- endif -%}
{%- endfor -%} {%- endfor -%}
{%- if supplementary_addresses_in_ssl_keys is defined -%}
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
"{{ ip_item }}",
{%- endfor -%}
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}" "127.0.0.1","::1","{{ kube_apiserver_ip }}"
] ]
issue_cert_path: "{{ item }}" issue_cert_path: "{{ item }}"
@ -98,6 +103,8 @@
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml - include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars: vars:
issue_cert_common_name: "front-proxy-client" issue_cert_common_name: "front-proxy-client"
issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
issue_cert_ca_filename: front-proxy-ca.pem
issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube issue_cert_file_owner: kube
@ -110,12 +117,17 @@
"{{ hostvars[host]['ip'] }}", "{{ hostvars[host]['ip'] }}",
{%- endif -%} {%- endif -%}
{%- endfor -%} {%- endfor -%}
{%- if supplementary_addresses_in_ssl_keys is defined -%}
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
"{{ ip_item }}",
{%- endfor -%}
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}" "127.0.0.1","::1","{{ kube_apiserver_ip }}"
] ]
issue_cert_path: "{{ item }}" issue_cert_path: "{{ item }}"
issue_cert_role: front-proxy-client issue_cert_role: front-proxy-client
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}" issue_cert_mount_path: "{{ front_proxy_vault_mount_path }}"
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}" with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
notify: set secret_changed notify: set secret_changed

View file

@ -32,7 +32,7 @@
sync_file_hosts: "{{ groups['kube-master'] }}" sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_is_cert: true sync_file_is_cert: true
sync_file_owner: kube sync_file_owner: kube
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"] with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
- name: sync_kube_master_certs | Set facts for kube master components sync_file results - name: sync_kube_master_certs | Set facts for kube master components sync_file results
set_fact: set_fact:
@ -44,6 +44,18 @@
set_fact: set_fact:
sync_file_results: [] sync_file_results: []
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
vars:
sync_file: front-proxy-ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_owner: kube
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
set_fact:
sync_file_results: []
- include_tasks: ../../../vault/tasks/shared/sync_file.yml - include_tasks: ../../../vault/tasks/shared/sync_file.yml
vars: vars:
sync_file: "{{ item }}" sync_file: "{{ item }}"

View file

@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/kube-ca.crt /etc/pki/ca-trust/source/anchors/kube-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/kube-ca.pem /etc/ssl/certs/kube-ca.pem
{%- elif ansible_os_family == "Suse" -%}
/etc/pki/trust/anchors/kube-ca.pem
{%- endif %} {%- endif %}
tags: tags:
- facts - facts
@ -19,9 +21,9 @@
remote_src: true remote_src: true
register: kube_ca_cert register: kube_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates command: update-ca-certificates
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat) - name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract command: update-ca-trust extract

View file

@ -5,7 +5,7 @@ bootstrap_os: none
# Use proxycommand if bastion host is in group all # Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance # This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
kube_api_anonymous_auth: false kube_api_anonymous_auth: false
@ -129,6 +129,10 @@ kube_apiserver_insecure_port: 8080
# Aggregator # Aggregator
kube_api_aggregator_routing: false kube_api_aggregator_routing: false
# Docker options
# Optionally do not run docker role
manage_docker: true
# Path used to store Docker data # Path used to store Docker data
docker_daemon_graph: "/var/lib/docker" docker_daemon_graph: "/var/lib/docker"
@ -219,6 +223,10 @@ vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles" vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets" vault_secrets_dir: "{{ vault_base_dir }}/secrets"
# Local volume provisioner dirs
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141 ## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue ## Set this variable to true to get rid of this issue

View file

@ -1,7 +1,7 @@
--- ---
# Limits # Limits
weave_memory_limits: 400M weave_memory_limits: 400M
weave_cpu_limits: 30m weave_cpu_limits: 300m
weave_memory_requests: 64M weave_memory_requests: 64M
weave_cpu_requests: 10m weave_cpu_requests: 10m

View file

@ -34,3 +34,13 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: install rkt pkg on openSUSE
zypper:
name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
state: present
register: rkt_task_result
until: rkt_task_result|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "Suse"

2
roles/rkt/vars/suse.yml Normal file
View file

@ -0,0 +1,2 @@
---
rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"

View file

@ -97,6 +97,11 @@ vault_ca_options:
format: pem format: pem
ttl: "{{ vault_max_lease_ttl }}" ttl: "{{ vault_max_lease_ttl }}"
exclude_cn_from_sans: true exclude_cn_from_sans: true
front_proxy:
common_name: front-proxy
format: pem
ttl: "{{ vault_max_lease_ttl }}"
exclude_cn_from_sans: true
vault_client_headers: vault_client_headers:
Accept: "application/json" Accept: "application/json"
@ -164,9 +169,16 @@ vault_pki_mounts:
allow_any_name: true allow_any_name: true
enforce_hostnames: false enforce_hostnames: false
organization: "system:node-proxier" organization: "system:node-proxier"
front_proxy:
name: front-proxy
default_lease_ttl: "{{ vault_default_lease_ttl }}"
max_lease_ttl: "{{ vault_max_lease_ttl }}"
description: "Kubernetes Front Proxy CA"
cert_dir: "{{ vault_kube_cert_dir }}"
roles:
- name: front-proxy-client - name: front-proxy-client
group: k8s-cluster group: k8s-cluster
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}" password: "{{ lookup('password', inventory_dir + '/credentials/vault/front-proxy-client.creds length=15') }}"
policy_rules: default policy_rules: default
role_options: role_options:
allow_any_name: true allow_any_name: true

View file

@ -57,6 +57,7 @@
gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}" gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}"
gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.etcd }}" gen_ca_vault_options: "{{ vault_ca_options.etcd }}"
gen_ca_copy_group: "etcd"
when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed
- import_tasks: gen_vault_certs.yml - import_tasks: gen_vault_certs.yml

View file

@ -6,8 +6,9 @@
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
create_mount_description: "{{ item.description }}" create_mount_description: "{{ item.description }}"
create_mount_cert_dir: "{{ item.cert_dir }}" create_mount_cert_dir: "{{ item.cert_dir }}"
create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name and item.name != vault_pki_mounts.front_proxy.name
with_items: with_items:
- "{{ vault_pki_mounts.vault }}" - "{{ vault_pki_mounts.vault }}"
- "{{ vault_pki_mounts.etcd }}" - "{{ vault_pki_mounts.etcd }}"
- "{{ vault_pki_mounts.kube }}" - "{{ vault_pki_mounts.kube }}"
- "{{ vault_pki_mounts.front_proxy }}"

View file

@ -32,6 +32,15 @@
gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}" gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}"
gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.kube }}" gen_ca_vault_options: "{{ vault_ca_options.kube }}"
gen_ca_copy_group: "kube-master"
when: inventory_hostname in groups.vault
- include_tasks: ../shared/gen_ca.yml
vars:
gen_ca_cert_dir: "{{ vault_pki_mounts.front_proxy.cert_dir }}"
gen_ca_mount_path: "{{ vault_pki_mounts.front_proxy.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.front_proxy }}"
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include_tasks: ../shared/auth_backend.yml - include_tasks: ../shared/auth_backend.yml
@ -46,6 +55,7 @@
- "{{ vault_pki_mounts.vault }}" - "{{ vault_pki_mounts.vault }}"
- "{{ vault_pki_mounts.etcd }}" - "{{ vault_pki_mounts.etcd }}"
- "{{ vault_pki_mounts.kube }}" - "{{ vault_pki_mounts.kube }}"
- "{{ vault_pki_mounts.front_proxy }}"
loop_control: loop_control:
loop_var: mount loop_var: mount
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault

View file

@ -24,9 +24,12 @@
mode: 0644 mode: 0644
when: vault_ca_gen.status == 200 when: vault_ca_gen.status == 200
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally"
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts"
copy: copy:
content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}"
dest: "{{ gen_ca_cert_dir }}/ca-key.pem" dest: "{{ gen_ca_cert_dir }}/ca-key.pem"
mode: 0640 mode: 0640
when: vault_ca_gen.status == 200 when: vault_ca_gen.status == 200
delegate_to: "{{ item }}"
with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"

View file

@ -6,6 +6,7 @@
# issue_cert_alt_name: Requested Subject Alternative Names, in a list. # issue_cert_alt_name: Requested Subject Alternative Names, in a list.
# issue_cert_common_name: Common Name included in the cert # issue_cert_common_name: Common Name included in the cert
# issue_cert_copy_ca: Copy issuing CA cert needed # issue_cert_copy_ca: Copy issuing CA cert needed
# issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem)
# issue_cert_dir_mode: Mode of the placed cert directory # issue_cert_dir_mode: Mode of the placed cert directory
# issue_cert_file_group: Group of the placed cert file and directory # issue_cert_file_group: Group of the placed cert file and directory
# issue_cert_file_mode: Mode of the placed cert file # issue_cert_file_mode: Mode of the placed cert file
@ -100,7 +101,7 @@
- name: issue_cert | Copy issuing CA cert - name: issue_cert | Copy issuing CA cert
copy: copy:
content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n" content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n"
dest: "{{ issue_cert_path | dirname }}/ca.pem" dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}"
group: "{{ issue_cert_file_group | d('root' )}}" group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0644') }}" mode: "{{ issue_cert_file_mode | d('0644') }}"
owner: "{{ issue_cert_file_owner | d('root') }}" owner: "{{ issue_cert_file_owner | d('root') }}"

View file

@ -28,7 +28,7 @@
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker, when: manage_docker|default(true) }
- role: rkt - role: rkt
tags: rkt tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"

View file

@ -26,6 +26,7 @@ data_files =
upgrade-cluster.yml upgrade-cluster.yml
scale.yml scale.yml
reset.yml reset.yml
remove-node.yml
extra_playbooks/upgrade-only-k8s.yml extra_playbooks/upgrade-only-k8s.yml
/usr/share/kubespray/roles = roles/* /usr/share/kubespray/roles = roles/*
/usr/share/doc/kubespray/ = /usr/share/doc/kubespray/ =

View file

@ -1,4 +1,4 @@
INVENTORY=$(PWD)/../inventory/sample/hosts.ini INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
$(HOME)/.ssh/id_rsa: $(HOME)/.ssh/id_rsa:
mkdir -p $(HOME)/.ssh mkdir -p $(HOME)/.ssh

View file

@ -7,8 +7,6 @@ startup_script: ""
# Deployment settings # Deployment settings
kube_network_plugin: weave kube_network_plugin: weave
weave_cpu_limits: "100m"
weave_cpu_requests: "100m"
kubeadm_enabled: true kubeadm_enabled: true
deploy_netchecker: true deploy_netchecker: true
kubedns_min_replicas: 1 kubedns_min_replicas: 1

View file

@ -16,7 +16,5 @@ deploy_netchecker: true
kubedns_min_replicas: 1 kubedns_min_replicas: 1
cloud_provider: gce cloud_provider: gce
kube_encrypt_secret_data: true kube_encrypt_secret_data: true
prometheus_operator_enabled: true
k8s_metrics_enabled: true
ingress_nginx_enabled: true ingress_nginx_enabled: true
cert_manager_enabled: true cert_manager_enabled: true

View file

@ -7,8 +7,6 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
# Deployment settings # Deployment settings
kube_network_plugin: weave kube_network_plugin: weave
weave_cpu_limits: "100m"
weave_cpu_requests: "100m"
bootstrap_os: coreos bootstrap_os: coreos
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12 resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true deploy_netchecker: true

View file

@ -0,0 +1,12 @@
# Instance settings
cloud_image_family: opensuse-leap
cloud_region: us-central1-c
mode: default
# Deployment settings
bootstrap_os: opensuse
kube_network_plugin: canal
kubeadm_enabled: true
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce

View file

@ -5,8 +5,6 @@ mode: default
# Deployment settings # Deployment settings
kube_network_plugin: weave kube_network_plugin: weave
weave_cpu_limits: "100m"
weave_cpu_requests: "100m"
deploy_netchecker: true deploy_netchecker: true
kubedns_min_replicas: 1 kubedns_min_replicas: 1
cloud_provider: gce cloud_provider: gce

Some files were not shown because too many files have changed in this diff Show more