From 95e2bde15bd86b9607e98529b6e4eaf4d88642c4 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Fri, 16 Feb 2018 16:20:08 -0500 Subject: [PATCH 01/33] set nodeName to "{{ inventory_hostname }}" in kubeadm-config --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index ed1cc7add..dc842a5e6 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -83,3 +83,5 @@ apiServerCertSANs: {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" +nodeName: {{ inventory_hostname }} + From aa30fa8009e1b41176dbc1be3f0c99703171a7e6 Mon Sep 17 00:00:00 2001 From: gorazio Date: Tue, 20 Mar 2018 08:47:36 +0300 Subject: [PATCH 02/33] Add prometheus annotations to spec in ingress Added annotations from metadata to spec.template.metadata. Without it, pod does not get any annotations, and Prometheus didn't see it --- .../ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 7fd3a946c..3a4c7860b 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -20,6 +20,9 @@ spec: labels: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' spec: containers: - name: ingress-nginx-controller From 96e46c4209003bfa61decf9c40eed670d6eed704 Mon Sep 17 00:00:00 2001 From: gorazio Date: Tue, 20 Mar 2018 10:23:50 +0300 Subject: [PATCH 03/33] bump after CLA signing --- .../ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 3a4c7860b..f8fac3b09 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -73,3 +73,4 @@ spec: {% if rbac_enabled %} serviceAccountName: ingress-nginx {% endif %} + From 8b71ef8ceb46bcc93ad547f4ccfd452c53d40bee Mon Sep 17 00:00:00 2001 From: Erwan Miran Date: Wed, 21 Mar 2018 09:19:05 +0100 Subject: [PATCH 04/33] Labels from role (node-role.k8s.io/node) and labels from inventory are merged into node-labels parameter in kubelet --- docs/vars.md | 2 ++ .../node/templates/kubelet.standard.env.j2 | 16 ++++++++++++---- tests/ansible.cfg | 1 + 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index 5ea76b0e5..f4956c882 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -118,6 +118,8 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st * *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. By default autodetection is used to match Docker configuration. +* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. + For example, labels can be set in the inventory as variables or more widely in group_vars ##### Custom flags for Kube Components For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example: diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index d33adfba7..05874a5de 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -81,16 +81,24 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {# Kubelet node labels #} +{% set role_node_labels = [] %} {% if inventory_hostname in groups['kube-master'] %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/master=true') %} {% if not standalone_kubelet|bool %} -{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% else %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} +{% set inventory_node_labels = [] %} +{% if node_labels is defined %} +{% for labelname, labelvalue in node_labels.iteritems() %} +{% do inventory_node_labels.append(labelname + '=' + labelvalue) %} +{% endfor %} +{% endif %} +{% set all_node_labels = role_node_labels + inventory_node_labels %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/tests/ansible.cfg b/tests/ansible.cfg index 9e734403e..9c4057529 100644 --- a/tests/ansible.cfg +++ b/tests/ansible.cfg @@ -10,3 +10,4 @@ fact_caching_connection = /tmp stdout_callback = skippy library = ./library:../library callback_whitelist = profile_tasks +jinja2_extensions = jinja2.ext.do From d3780e181ef8cec712b843f0db4a8997c226fe03 Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 21 Mar 2018 23:27:13 +0100 Subject: [PATCH 05/33] Switch hyperkube from CoreOS to Google --- roles/download/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index a9c767dc0..10522d9ec 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -70,8 +70,8 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" -hyperkube_image_repo: "quay.io/coreos/hyperkube" -hyperkube_image_tag: "{{ kube_version }}_coreos.0" +hyperkube_image_repo: "gcr.io/google-containers/hyperkube" +hyperkube_image_tag: "v{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" From 405c711edb76392ca2cedff6ca392f95cd51bc8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Thu, 22 Mar 2018 09:07:28 +0100 Subject: [PATCH 06/33] Remove v in tag --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 10522d9ec..1cfdd3e93 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -71,7 +71,7 @@ calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" hyperkube_image_repo: "gcr.io/google-containers/hyperkube" -hyperkube_image_tag: "v{{ kube_version }}" +hyperkube_image_tag: "{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" From 9fa995ac9d595cc75695fb8b977ac2dd75328c46 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Fri, 23 Mar 2018 08:33:25 -0400 Subject: [PATCH 07/33] only sets nodeName in kubeadm-config when kube_override_hostname is set --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index dc842a5e6..b2d2cd2e7 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -83,5 +83,7 @@ apiServerCertSANs: {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" +{% if kube_override_hostname|default('') %} nodeName: {{ inventory_hostname }} +{% endif %} From 1d0415a6cf5015373a35f2f50adc7749d1a014e0 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Sat, 24 Mar 2018 13:29:07 -0400 Subject: [PATCH 08/33] fixes typo in kube_override_hostname for kubeadm --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index b2d2cd2e7..5ea5d712c 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -84,6 +84,6 @@ apiServerCertSANs: certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} -nodeName: {{ inventory_hostname }} +nodeName: {{ kube_override_hostname }} {% endif %} From 5f5d0ffe14c0c8b5d919cb17fe6c0d22ab485a24 Mon Sep 17 00:00:00 2001 From: Erwan SEITE Date: Tue, 12 Dec 2017 11:47:04 +0100 Subject: [PATCH 09/33] replace sudo by become --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 9db4be3a1..64a3009c0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -164,7 +164,7 @@ Vagrant.configure("2") do |config| if File.exist?(File.join(File.dirname($inventory), "hosts")) ansible.inventory_path = $inventory end - ansible.sudo = true + ansible.become = true ansible.limit = "all" ansible.host_key_checking = false ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"] From 31705a502d672b0cd917e78b2ff5573b96dd3161 Mon Sep 17 00:00:00 2001 From: Erwan SEITE Date: Wed, 20 Dec 2017 15:40:35 +0100 Subject: [PATCH 10/33] change vagrant version --- Vagrantfile | 8 +------- docs/vagrant.md | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 64a3009c0..536bbff2b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -3,7 +3,7 @@ require 'fileutils' -Vagrant.require_version ">= 1.9.0" +Vagrant.require_version ">= 2.0.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") @@ -135,12 +135,6 @@ Vagrant.configure("2") do |config| config.vm.network :private_network, ip: ip - # workaround for Vagrant 1.9.1 and centos vm - # https://github.com/hashicorp/vagrant/issues/8096 - if Vagrant::VERSION == "1.9.1" && $os == "centos" - config.vm.provision "shell", inline: "service network restart", run: "always" - end - # Disable swap for each vm config.vm.provision "shell", inline: "swapoff -a" diff --git a/docs/vagrant.md b/docs/vagrant.md index 042e8137b..de47159fa 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -1,7 +1,7 @@ Vagrant Install ================= -Assuming you have Vagrant (1.9+) installed with virtualbox (it may work +Assuming you have Vagrant (2.0+) installed with virtualbox (it may work with vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `$ vagrant up`.
From 076b5c153ffaecbdc20f588b5ed672fa3a3690cf Mon Sep 17 00:00:00 2001 From: avoidik Date: Tue, 27 Mar 2018 11:13:36 +0300 Subject: [PATCH 11/33] Return subnet_id as defined in kubespray.tf --- contrib/terraform/openstack/modules/network/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf index a426202b9..e56a792c2 100644 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ b/contrib/terraform/openstack/modules/network/outputs.tf @@ -2,6 +2,6 @@ output "router_id" { value = "${openstack_networking_router_interface_v2.k8s.id}" } -output "network_id" { +output "subnet_id" { value = "${openstack_networking_subnet_v2.k8s.id}" } From 72a42238849d00f1e40aee50ea11c4d628ddb272 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 28 Mar 2018 16:26:36 +0300 Subject: [PATCH 12/33] Write cloud-config during kubelet configuration This file should only be updated during kubelet upgrade so that master components are not accidentally restarted first during preinstall stage. --- roles/kubernetes/node/tasks/main.yml | 13 +++++++++++++ roles/kubernetes/preinstall/tasks/main.yml | 13 ------------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 4d5fa5df5..78e6d92d6 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -134,6 +134,19 @@ tags: - kube-proxy +- name: Write cloud-config + template: + src: "{{ cloud_provider }}-cloud-config.j2" + dest: "{{ kube_config_dir }}/cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + notify: restart kubelet + tags: + - cloud-provider + # reload-systemd - meta: flush_handlers diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index f23040751..aca0c9606 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -256,19 +256,6 @@ tags: - bootstrap-os -- name: Write cloud-config - template: - src: "{{ cloud_provider }}-cloud-config.j2" - dest: "{{ kube_config_dir }}/cloud_config" - group: "{{ kube_cert_group }}" - mode: 0640 - when: - - inventory_hostname in groups['k8s-cluster'] - - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] - tags: - - cloud-provider - - import_tasks: etchosts.yml tags: - bootstrap-os From 0df32b03cadab6965322964b067c1e93eb2cb206 Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 28 Mar 2018 17:42:12 +0200 Subject: [PATCH 13/33] Update openssl.conf to count better and work with Jinja 2.9 --- roles/etcd/templates/openssl.conf.j2 | 21 +++++---- .../secrets/templates/openssl.conf.j2 | 44 +++++++++++-------- 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2 index 48327f0bf..2f4f7e262 100644 --- a/roles/etcd/templates/openssl.conf.j2 +++ b/roles/etcd/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name @@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer [alt_names] DNS.1 = localhost {% for host in groups['etcd'] %} -DNS.{{ 1 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% if loadbalancer_apiserver is defined %} -{% set idx = groups['etcd'] | length | int + 2 %} -DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} -{% set idx = groups['etcd'] | length | int + 3 %} {% for etcd_alt_name in etcd_cert_alt_names %} -DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }} +DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} {% endfor %} {% for host in groups['etcd'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} {% endfor %} -{% set idx = groups['etcd'] | length | int * 2 + 1 %} -IP.{{ idx }} = 127.0.0.1 +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index adc875ba6..579e2aad1 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] @@ -13,31 +13,39 @@ DNS.3 = kubernetes.default.svc DNS.4 = kubernetes.default.svc.{{ dns_domain }} DNS.5 = localhost {% for host in groups['kube-master'] %} -DNS.{{ 5 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% set idns = groups['kube-master'] | length | int + 5 %} -{% if loadbalancer_apiserver is defined %} -{% set idns = idns + 1 %} -DNS.{{ idns | string }} = {{ apiserver_loadbalancer_domain_name }} +{% for host in groups['kube-node'] %} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} +{% endfor %} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} {% for host in groups['kube-master'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -{% endfor %} -{% set idx = groups['kube-master'] | length | int * 2 + 1 %} -IP.{{ idx }} = {{ kube_apiserver_ip }} -{% if loadbalancer_apiserver is defined %} -IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }} -{% set idx = idx + 1 %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% for host in groups['kube-node'] %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% if kube_apiserver_ip is defined %} +IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }} +{% endif %} +{% if loadbalancer_apiserver.address is defined %} +IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }} {% endif %} -IP.{{ idx + 1 }} = 127.0.0.1 {% if supplementary_addresses_in_ssl_keys is defined %} -{% set is = idx + 1 %} {% for addr in supplementary_addresses_in_ssl_keys %} {% if addr | ipaddr %} -IP.{{ is + loop.index }} = {{ addr }} +IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }} {% else %} -DNS.{{ idns + loop.index }} = {{ addr }} +DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }} {% endif %} {% endfor %} {% endif %} +IP.{{ counter["ip"] }} = 127.0.0.1 From 0b5404b2b7561e268e10bf96796170a4a326658c Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 28 Mar 2018 20:28:02 +0200 Subject: [PATCH 14/33] Fix --- roles/kubernetes/secrets/templates/openssl.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index 579e2aad1..d9720c3fe 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -36,7 +36,7 @@ IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansib {% if kube_apiserver_ip is defined %} IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }} {% endif %} -{% if loadbalancer_apiserver.address is defined %} +{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %} IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }} {% endif %} {% if supplementary_addresses_in_ssl_keys is defined %} From ef7f5edbb3643dd23009c35e78e6efaae77f1f08 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Wed, 28 Mar 2018 15:10:39 -0500 Subject: [PATCH 15/33] Remove old docker packages and other docker upgrade fixes (#2536) * Remove old docker packages This removes docker packages that are obsolete if docker-ce packages are to be installed, which fixes some package conflict issues that can occur during upgrades. * Add support for setting obsoletes=0 when installing docker with yum --- roles/docker/defaults/main.yml | 4 ++++ roles/docker/tasks/main.yml | 18 ++++++++++++++++++ roles/docker/tasks/pre-upgrade.yml | 20 ++++++++++++++++++++ roles/docker/vars/redhat.yml | 2 ++ 4 files changed, 44 insertions(+) create mode 100644 roles/docker/tasks/pre-upgrade.yml diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index aa10371f5..3ed3e9ce7 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -21,6 +21,10 @@ docker_dns_servers_strict: yes docker_container_storage_setup: false +# Used to override obsoletes=0 +yum_conf: /etc/yum.conf +docker_yum_conf: /etc/yum_docker.conf + # CentOS/RedHat docker-ce repo docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable' docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 80b917114..729397b44 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -30,6 +30,8 @@ tags: - facts +- import_tasks: pre-upgrade.yml + - name: ensure docker-ce repository public key is installed action: "{{ docker_repo_key_info.pkg_key }}" args: @@ -78,11 +80,27 @@ dest: "/etc/yum.repos.d/docker.repo" when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic +- name: Copy yum.conf for editing + copy: + src: "{{ yum_conf }}" + dest: "{{ docker_yum_conf }}" + remote_src: yes + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + +- name: Edit copy of yum.conf to set obsoletes=0 + lineinfile: + path: "{{ docker_yum_conf }}" + state: present + regexp: '^obsoletes=' + line: 'obsoletes=0' + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + - name: ensure docker packages are installed action: "{{ docker_package_info.pkg_mgr }}" args: pkg: "{{item.name}}" force: "{{item.force|default(omit)}}" + conf_file: "{{item.yum_conf|default(omit)}}" state: present register: docker_task_result until: docker_task_result|succeeded diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml new file mode 100644 index 000000000..9315da305 --- /dev/null +++ b/roles/docker/tasks/pre-upgrade.yml @@ -0,0 +1,20 @@ +--- +- name: Ensure old versions of Docker are not installed. | Debian + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-engine + when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + +- name: Ensure old versions of Docker are not installed. | RedHat + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-common + - docker-engine + - docker-selinux + when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) \ No newline at end of file diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 39ba211d8..cd53e284c 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -28,7 +28,9 @@ docker_package_info: pkg_mgr: yum pkgs: - name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" - name: "{{ docker_versioned_pkg[docker_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" docker_repo_key_info: pkg_key: '' From 9ebbf1c3cdd0f192d12a2359ba681fdf59b259b4 Mon Sep 17 00:00:00 2001 From: Kuldip Madnani Date: Wed, 28 Mar 2018 16:24:11 -0500 Subject: [PATCH 16/33] Added a fix in openssl.conf template to check if IP of loadbalncer is available or not. --- roles/kubernetes/secrets/templates/openssl.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index adc875ba6..b02970d1e 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -26,7 +26,7 @@ IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansi {% endfor %} {% set idx = groups['kube-master'] | length | int * 2 + 1 %} IP.{{ idx }} = {{ kube_apiserver_ip }} -{% if loadbalancer_apiserver is defined %} +{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %} IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }} {% set idx = idx + 1 %} {% endif %} From c8f857eae430d42057b661dcbbb86843a0a0df10 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Thu, 29 Mar 2018 09:35:28 -0500 Subject: [PATCH 17/33] configure kubespray to sign service account tokens with a dedicated and stable key --- .../templates/manifests/kube-apiserver.manifest.j2 | 2 +- .../manifests/kube-controller-manager.manifest.j2 | 2 +- roles/kubernetes/secrets/files/make-ssl.sh | 11 +++++++++++ roles/kubernetes/secrets/tasks/gen_certs_script.yml | 2 ++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 350a27a18..a9cd1cc60 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -63,7 +63,7 @@ spec: {% if kube_token_auth|default(true) %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv {% endif %} - - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} - --oidc-issuer-url={{ kube_oidc_url }} - --oidc-client-id={{ kube_oidc_client_id }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 2b4282a2e..2ead625cb 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -29,7 +29,7 @@ spec: - controller-manager - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 724c6f369..1c34fc69d 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -82,6 +82,17 @@ gen_key_and_cert() { # Admins if [ -n "$MASTERS" ]; then + + # service-account + # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions + if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem + fi + # Generate dedicated service account signing key if one doesn't exist + if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + openssl genrsa -out service-account-key.pem 2048 > /dev/null 2>&1 + fi + # kube-apiserver # Generate only if we don't have existing ca and apiserver certs if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 011575358..c39f606ad 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -75,6 +75,7 @@ 'kube-controller-manager-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', @@ -86,6 +87,7 @@ 'apiserver-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', 'kube-scheduler.pem', 'kube-scheduler-key.pem', 'kube-controller-manager.pem', From daeeae1a91aad8d633304f5961ee33df5ed813f1 Mon Sep 17 00:00:00 2001 From: Kuldip Madnani Date: Thu, 29 Mar 2018 11:37:32 -0500 Subject: [PATCH 18/33] Added retries in pre-upgrade.yml and retries while applying kube-dns.yml (#2553) * Added retries in pre-upgrade.yml and retries while applying kube-dns.yml * Removed trailing spaces --- roles/kubernetes-apps/ansible/tasks/main.yml | 4 ++++ roles/kubernetes/master/tasks/pre-upgrade.yml | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 55d417982..c03a78722 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -50,6 +50,10 @@ - dns_mode != 'none' - inventory_hostname == groups['kube-master'][0] - not item|skipped + register: resource_result + until: resource_result|succeeded + retries: 4 + delay: 5 tags: - dnsmasq diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 3a9fe6417..56e57b015 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -30,4 +30,7 @@ with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: kube_apiserver_manifest_replaced.changed - run_once: true + register: remove_master_container + retries: 4 + until: remove_master_container.rc == 0 + delay: 5 \ No newline at end of file From 4d85e3765e1c3aefdca224edf3b60e0b0e8e5ebb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=AE=8F?= Date: Fri, 30 Mar 2018 09:19:00 +0800 Subject: [PATCH 19/33] remove redundancy code --- roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 7c8e0062d..57c2269a9 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -48,7 +48,6 @@ spec: {% elif kube_proxy_mode == 'ipvs' %} - --masquerade-all - --feature-gates=SupportIPVSProxyMode=true - - --proxy-mode=ipvs - --ipvs-min-sync-period=5s - --ipvs-sync-period=5s - --ipvs-scheduler=rr From 4a705b3fbab6516952f1ec0775a6843994dc48e6 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Fri, 30 Mar 2018 16:42:08 +0800 Subject: [PATCH 20/33] May vault health check needs delay --- roles/vault/tasks/cluster/systemd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/vault/tasks/cluster/systemd.yml b/roles/vault/tasks/cluster/systemd.yml index 8df52f982..f7139d336 100644 --- a/roles/vault/tasks/cluster/systemd.yml +++ b/roles/vault/tasks/cluster/systemd.yml @@ -55,3 +55,4 @@ register: vault_health_check until: vault_health_check|succeeded retries: 10 + delay: "{{ retry_stagger | random + 3 }}" From 004b0a3fcf47b601bfe2bf76c2a49b8144199858 Mon Sep 17 00:00:00 2001 From: woopstar Date: Fri, 30 Mar 2018 11:38:06 +0200 Subject: [PATCH 21/33] Fix merge conflict --- .../master/templates/kubeadm-config.yaml.j2 | 74 ++++++++++++++++++- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index 393eaf99f..c2339d890 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -16,8 +16,11 @@ networking: serviceSubnet: {{ kube_service_addresses }} podSubnet: {{ kube_pods_subnet }} kubernetesVersion: {{ kube_version }} -{% if cloud_provider is defined and cloud_provider != "gce" %} -cloudProvider: {{ cloud_provider }} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +cloud-provider: {{ cloud_provider }} +cloud-config: {{ kube_config_dir }}/cloud_config +{% elif cloud_provider is defined and cloud_provider == "aws" %} +cloud-provider: {{ cloud_provider }} {% endif %} {% if kube_proxy_mode == 'ipvs' %} kubeProxy: @@ -38,12 +41,24 @@ apiServerExtraArgs: apiserver-count: "{{ kube_apiserver_count }}" {% if kube_version | version_compare('v1.9', '>=') %} endpoint-reconciler-type: lease -{% endif %} +{% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" + profiling: "{{ kube_profiling }}" + enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" + repair-malformed-updates: "false" +{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} + anonymous-auth: "{{ kube_api_anonymous_auth }}" +{% endif %} +{% if kube_feature_gates %} + feature-gates: {{ kube_feature_gates|join(',') }} +{% endif %} {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv {% endif %} +{% if kube_token_auth|default(true) %} + token-auth-file: {{ kube_token_dir }}/known_tokens.csv +{% endif %} {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: {{ kube_oidc_url }} oidc-client-id: {{ kube_oidc_client_id }} @@ -72,6 +87,23 @@ controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }} + enable-hostpath-provisioner: "{{ kube_hostpath_dynamic_provisioner }}" + profiling: "{{ kube_profiling }}" +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + cloud-provider: {{cloud_provider}} + cloud-config: {{ kube_config_dir }}/cloud_config +{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} + cloud-provider: {{cloud_provider}} +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} + configure-cloud-routes: "true" +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium"] %} + allocate-node-cidrs: true + cluster-cidr: {{ kube_pods_subnet }} + service-cluster-ip-range: {{ kube_service_addresses }} + node-cidr-mask-size: {{ kube_network_node_prefix }} +{% endif %} {% if kube_feature_gates %} feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} @@ -80,6 +112,13 @@ controllerManagerExtraArgs: {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: +{% if volume_cross_zone_attachment %} + policy-config-file: {{ kube_config_dir }}/kube-scheduler-policy.yaml +{% endif %} + profiling: "{{ kube_profiling }}" +{% if kube_feature_gates %} + feature-gates: {{ kube_feature_gates|join(',') }} +{% endif %} {% for key in kube_kubeadm_scheduler_extra_args %} {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} @@ -93,4 +132,31 @@ unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} nodeName: {{ kube_override_hostname }} {% endif %} - +apiServerExtraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} +{% if kube_basic_auth|default(true) %} +- name: basic-auth-config + hostPath: {{ kube_users_dir }} + mountPath: {{ kube_users_dir }} +{% endif %} +{% if kube_token_auth|default(true) %} +- name: token-auth-config + hostPath: {{ kube_token_dir }} + mountPath: {{ kube_token_dir }} +{% endif %} +controllerManagerExtraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} +schedulerExtraVolumes: +{% if (cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"]) or volume_cross_zone_attachment %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} From af5f376163af5f4f4bfe20efe04610c78f3e657b Mon Sep 17 00:00:00 2001 From: Andreas Kruger Date: Fri, 30 Mar 2018 11:42:20 +0200 Subject: [PATCH 22/33] Revert --- .../master/templates/kubeadm-config.yaml.j2 | 71 +------------------ 1 file changed, 2 insertions(+), 69 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index c2339d890..0eccb4918 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -16,11 +16,8 @@ networking: serviceSubnet: {{ kube_service_addresses }} podSubnet: {{ kube_pods_subnet }} kubernetesVersion: {{ kube_version }} -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} -cloud-provider: {{ cloud_provider }} -cloud-config: {{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider == "aws" %} -cloud-provider: {{ cloud_provider }} +{% if cloud_provider is defined and cloud_provider != "gce" %} +cloudProvider: {{ cloud_provider }} {% endif %} {% if kube_proxy_mode == 'ipvs' %} kubeProxy: @@ -44,21 +41,9 @@ apiServerExtraArgs: {% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" - profiling: "{{ kube_profiling }}" - enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" - repair-malformed-updates: "false" -{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - anonymous-auth: "{{ kube_api_anonymous_auth }}" -{% endif %} -{% if kube_feature_gates %} - feature-gates: {{ kube_feature_gates|join(',') }} -{% endif %} {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv {% endif %} -{% if kube_token_auth|default(true) %} - token-auth-file: {{ kube_token_dir }}/known_tokens.csv -{% endif %} {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: {{ kube_oidc_url }} oidc-client-id: {{ kube_oidc_client_id }} @@ -87,23 +72,6 @@ controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }} - enable-hostpath-provisioner: "{{ kube_hostpath_dynamic_provisioner }}" - profiling: "{{ kube_profiling }}" -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - cloud-provider: {{cloud_provider}} - cloud-config: {{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} - cloud-provider: {{cloud_provider}} -{% endif %} -{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} - configure-cloud-routes: "true" -{% endif %} -{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium"] %} - allocate-node-cidrs: true - cluster-cidr: {{ kube_pods_subnet }} - service-cluster-ip-range: {{ kube_service_addresses }} - node-cidr-mask-size: {{ kube_network_node_prefix }} -{% endif %} {% if kube_feature_gates %} feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} @@ -112,13 +80,6 @@ controllerManagerExtraArgs: {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: -{% if volume_cross_zone_attachment %} - policy-config-file: {{ kube_config_dir }}/kube-scheduler-policy.yaml -{% endif %} - profiling: "{{ kube_profiling }}" -{% if kube_feature_gates %} - feature-gates: {{ kube_feature_gates|join(',') }} -{% endif %} {% for key in kube_kubeadm_scheduler_extra_args %} {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} @@ -132,31 +93,3 @@ unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} nodeName: {{ kube_override_hostname }} {% endif %} -apiServerExtraVolumes: -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} -{% if kube_basic_auth|default(true) %} -- name: basic-auth-config - hostPath: {{ kube_users_dir }} - mountPath: {{ kube_users_dir }} -{% endif %} -{% if kube_token_auth|default(true) %} -- name: token-auth-config - hostPath: {{ kube_token_dir }} - mountPath: {{ kube_token_dir }} -{% endif %} -controllerManagerExtraVolumes: -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} -schedulerExtraVolumes: -{% if (cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"]) or volume_cross_zone_attachment %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} From 03bcfa7ff544991da1a39af34a37ef644d27af91 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 30 Mar 2018 14:29:13 +0300 Subject: [PATCH 23/33] Stop templating kube-system namespace and creating it (#2545) Kubernetes makes this namespace automatically, so there is no need for kubespray to manage it. --- inventory/sample/group_vars/k8s-cluster.yml | 1 - roles/dnsmasq/tasks/main.yml | 2 +- .../templates/dnsmasq-clusterrolebinding.yml | 4 +-- roles/dnsmasq/templates/dnsmasq-deploy.yml | 2 +- .../templates/dnsmasq-serviceaccount.yml | 2 +- roles/dnsmasq/templates/dnsmasq-svc.yml | 2 +- roles/etcd/defaults/main.yml | 6 ++-- .../ansible/tasks/cleanup_dns.yml | 8 ++--- .../ansible/tasks/dashboard.yml | 2 +- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- .../coredns-clusterrolebinding.yml.j2 | 2 +- .../ansible/templates/coredns-config.yml.j2 | 2 +- .../templates/coredns-deployment.yml.j2 | 2 +- .../ansible/templates/coredns-sa.yml.j2 | 2 +- .../ansible/templates/coredns-svc.yml.j2 | 2 +- .../ansible/templates/dashboard.yml.j2 | 16 +++++----- .../kubedns-autoscaler-clusterrole.yml.j2 | 2 +- ...bedns-autoscaler-clusterrolebinding.yml.j2 | 4 +-- .../templates/kubedns-autoscaler-sa.yml.j2 | 2 +- .../templates/kubedns-autoscaler.yml.j2 | 4 +-- .../ansible/templates/kubedns-deploy.yml.j2 | 2 +- .../ansible/templates/kubedns-sa.yml.j2 | 2 +- .../ansible/templates/kubedns-svc.yml.j2 | 2 +- .../cluster_roles/tasks/main.yml | 29 ------------------- .../cluster_roles/templates/namespace.j2 | 2 +- .../efk/elasticsearch/tasks/main.yml | 6 ++-- .../templates/efk-clusterrolebinding.yml | 4 +-- .../efk/elasticsearch/templates/efk-sa.yml | 2 +- .../templates/elasticsearch-deployment.yml.j2 | 2 +- .../templates/elasticsearch-service.yml.j2 | 2 +- .../efk/fluentd/tasks/main.yml | 2 +- .../fluentd/templates/fluentd-config.yml.j2 | 2 +- .../efk/fluentd/templates/fluentd-ds.yml.j2 | 2 +- .../kubernetes-apps/efk/kibana/tasks/main.yml | 4 +-- .../kibana/templates/kibana-deployment.yml.j2 | 2 +- .../kibana/templates/kibana-service.yml.j2 | 2 +- .../cephfs_provisioner/defaults/main.yml | 2 +- .../defaults/main.yml | 2 +- roles/kubernetes-apps/helm/tasks/main.yml | 4 +-- .../templates/tiller-clusterrolebinding.yml | 4 +-- .../helm/templates/tiller-sa.yml | 2 +- .../network_plugin/calico/tasks/main.yml | 2 +- .../network_plugin/canal/tasks/main.yml | 2 +- .../network_plugin/cilium/tasks/main.yml | 4 +-- .../network_plugin/contiv/tasks/main.yml | 2 +- .../network_plugin/flannel/tasks/main.yml | 2 +- .../network_plugin/weave/tasks/main.yml | 2 +- .../policy_controller/calico/tasks/main.yml | 4 +-- .../templates/calico-kube-controllers.yml.j2 | 4 +-- .../calico/templates/calico-kube-cr.yml.j2 | 2 +- .../calico/templates/calico-kube-crb.yml.j2 | 2 +- .../calico/templates/calico-kube-sa.yml.j2 | 2 +- .../registry/defaults/main.yml | 2 +- .../rotate_tokens/tasks/main.yml | 2 +- .../manifests/kube-apiserver.manifest.j2 | 2 +- .../kube-controller-manager.manifest.j2 | 2 +- .../manifests/kube-scheduler.manifest.j2 | 2 +- roles/kubernetes/master/vars/main.yml | 6 ---- .../manifests/kube-proxy.manifest.j2 | 2 +- .../manifests/nginx-proxy.manifest.j2 | 2 +- roles/kubespray-defaults/defaults/main.yaml | 1 - .../calico/templates/calico-config.yml.j2 | 2 +- .../calico/templates/calico-cr.yml.j2 | 2 +- .../calico/templates/calico-crb.yml.j2 | 2 +- .../calico/templates/calico-node-sa.yml.j2 | 2 +- .../calico/templates/calico-node.yml.j2 | 2 +- .../canal/templates/canal-cr-calico.yml.j2 | 2 +- .../canal/templates/canal-crb-calico.yml.j2 | 2 +- .../canal/templates/canal-crb-flannel.yml.j2 | 2 +- .../canal/templates/canal-node-sa.yml.j2 | 2 +- .../canal/templates/canal-node.yaml.j2 | 2 +- .../cilium/templates/cilium-config.yml.j2 | 2 +- .../cilium/templates/cilium-crb.yml.j2 | 2 +- .../cilium/templates/cilium-ds.yml.j2 | 2 +- .../cilium/templates/cilium-sa.yml.j2 | 2 +- .../contiv/templates/contiv-api-proxy.yml.j2 | 4 +-- .../contiv/templates/contiv-config.yml.j2 | 2 +- .../contiv/templates/contiv-etcd-proxy.yml.j2 | 2 +- .../contiv/templates/contiv-etcd.yml.j2 | 2 +- .../contiv-netmaster-clusterrole.yml.j2 | 2 +- ...contiv-netmaster-clusterrolebinding.yml.j2 | 2 +- .../contiv-netmaster-serviceaccount.yml.j2 | 2 +- .../contiv/templates/contiv-netmaster.yml.j2 | 4 +-- .../contiv-netplugin-clusterrole.yml.j2 | 2 +- ...contiv-netplugin-clusterrolebinding.yml.j2 | 2 +- .../contiv-netplugin-serviceaccount.yml.j2 | 2 +- .../contiv/templates/contiv-netplugin.yml.j2 | 2 +- .../flannel/templates/cni-flannel-rbac.yml.j2 | 4 +-- .../flannel/templates/cni-flannel.yml.j2 | 4 +-- .../weave/templates/weave-net.yml.j2 | 16 +++++----- roles/vault/defaults/main.yml | 2 +- 91 files changed, 122 insertions(+), 159 deletions(-) delete mode 100644 roles/kubernetes/master/vars/main.yml diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 5f4889e8b..694368954 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -6,7 +6,6 @@ kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index b6574fd27..831330175 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -91,7 +91,7 @@ - name: Start Resources kube: name: "{{item.item.name}}" - namespace: "{{system_namespace}}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml index 817de877b..0fa300989 100644 --- a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml +++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" subjects: - kind: ServiceAccount name: dnsmasq - namespace: "{{ system_namespace}}" + namespace: "kube-system" roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index 838471050..0fb6045e8 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: dnsmasq - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: k8s-app: dnsmasq kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml index bce8a232f..91e98feee 100644 --- a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml +++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml index 54dc0aa97..f00d3d3dd 100644 --- a/roles/dnsmasq/templates/dnsmasq-svc.yml +++ b/roles/dnsmasq/templates/dnsmasq-svc.yml @@ -6,7 +6,7 @@ metadata: kubernetes.io/cluster-service: 'true' k8s-app: dnsmasq name: dnsmasq - namespace: {{system_namespace}} + namespace: kube-system spec: ports: - port: 53 diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 5f16db1d1..1268c13c7 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -12,9 +12,9 @@ etcd_cert_group: root # Note: This does not set up DNS entries. It simply adds the following DNS # entries to the certificate etcd_cert_alt_names: - - "etcd.{{ system_namespace }}.svc.{{ dns_domain }}" - - "etcd.{{ system_namespace }}.svc" - - "etcd.{{ system_namespace }}" + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" - "etcd" etcd_script_dir: "{{ bin_dir }}/etcd-scripts" diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 5f8356cf9..e77f1e799 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -2,7 +2,7 @@ - name: Kubernetes Apps | Delete old CoreDNS resources kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -16,7 +16,7 @@ - name: Kubernetes Apps | Delete kubeadm CoreDNS kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "deploy" state: absent @@ -28,7 +28,7 @@ - name: Kubernetes Apps | Delete old KubeDNS resources kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -41,7 +41,7 @@ - name: Kubernetes Apps | Delete kubeadm KubeDNS kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index ce56bd5d1..4c9ad5c74 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -22,7 +22,7 @@ - name: Kubernetes Apps | Start dashboard kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index c03a78722..ceb667f69 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -37,7 +37,7 @@ - name: Kubernetes Apps | Start Resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 index 6c49d047f..89becd5b4 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -15,4 +15,4 @@ roleRef: subjects: - kind: ServiceAccount name: coredns - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 index 983d2579f..360480c1e 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 index 30128d566..5cba6f1f0 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 index db5682354..64d9c4dae 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 index c5b76b0b5..193de10eb 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 index b1ba1481d..5f0a40cb3 100644 --- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -25,7 +25,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs - namespace: {{ system_namespace }} + namespace: kube-system type: Opaque --- @@ -37,7 +37,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Dashboard Role & Role Binding ------------------- # @@ -46,7 +46,7 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system rules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [""] @@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -89,7 +89,7 @@ roleRef: subjects: - kind: ServiceAccount name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Gross Hack For anonymous auth through api proxy ------------------- # @@ -103,7 +103,7 @@ rules: resources: ["services/proxy"] resourceNames: ["https:kubernetes-dashboard:"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"] +- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] --- @@ -128,7 +128,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 @@ -200,7 +200,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: ports: - port: 443 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 index f80d3d90c..e29ed4dac 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 @@ -17,7 +17,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: ["nodes"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 index eb76f2d4e..3b11c6b9f 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 @@ -17,11 +17,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-proportional-autoscaler diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 index 542ae86ce..4c440f653 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 @@ -17,4 +17,4 @@ kind: ServiceAccount apiVersion: v1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index df92ee615..d7c30eceb 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -40,7 +40,7 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace={{ system_namespace }} + - --namespace=kube-system - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kube-dns diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 682bdf491..cfce65f0e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kube-dns - namespace: "{{system_namespace}}" + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 index f399fd6f4..296a3a938 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 index 1c4710db1..6bc5f9240 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index c576586a2..fefa7caeb 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -126,32 +126,3 @@ - kube_version | version_compare('v1.9.3', '<=') - inventory_hostname == groups['kube-master'][0] tags: vsphere - -# This is not a cluster role, but should be run after kubeconfig is set on master -- name: Write kube system namespace manifest - template: - src: namespace.j2 - dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Check if kube system namespace exists - command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}" - register: 'kubesystem' - changed_when: False - failed_when: False - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Create kube system namespace - command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml" - retries: 4 - delay: "{{ retry_stagger | random + 3 }}" - register: create_system_ns - until: create_system_ns.rc == 0 - changed_when: False - when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0 - tags: - - apps diff --git a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 index 9bdf201a2..f2e115a6a 100644 --- a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 +++ b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: "{{system_namespace}}" + name: "kube-system" diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index 8abbe2317..b6055132b 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -10,7 +10,7 @@ when: rbac_enabled - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system" with_items: - "efk-sa.yml" - "efk-clusterrolebinding.yml" @@ -24,7 +24,7 @@ register: es_deployment_manifest - name: "ElasticSearch | Create ES deployment" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system" run_once: true when: es_deployment_manifest.changed @@ -35,6 +35,6 @@ register: es_service_manifest - name: "ElasticSearch | Create ES service" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system" run_once: true when: es_service_manifest.changed diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index a5aba61ae..dd5b9b630 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: efk - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index e79e26be8..75d75f650 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 index 6d5382e09..ee2eb8b21 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: elasticsearch-logging-v1 - namespace: "{{ system_namespace }}" + namespace: kube-system labels: k8s-app: elasticsearch-logging version: "{{ elasticsearch_image_tag }}" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 index b7558f9d9..789ecb215 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: elasticsearch-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml index c91bf6827..f444c79b6 100644 --- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml @@ -17,6 +17,6 @@ register: fluentd_ds_manifest - name: "Fluentd | Create fluentd daemonset" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system" run_once: true when: fluentd_ds_manifest.changed diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 index 8a8ebbcec..b7de44dc0 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: fluentd-config - namespace: "{{ system_namespace }}" + namespace: "kube-system" data: {{ fluentd_config_file }}: | # This configuration file for Fluentd / td-agent is used diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 960a79e89..f23a8851c 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: "fluentd-es-v{{ fluentd_version }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index ea8568286..424b313b8 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -10,7 +10,7 @@ filename: "{{kube_config_dir}}/kibana-deployment.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "deployment" state: "latest" with_items: "{{ kibana_deployment_manifest.changed }}" @@ -27,7 +27,7 @@ filename: "{{kube_config_dir}}/kibana-service.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "svc" state: "latest" with_items: "{{ kibana_service_manifest.changed }}" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 index c48413bd0..4fdf54c04 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 index 241b896f0..5cff3c628 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml index 9a3bca1ef..3b80ecbb2 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner cephfs_provisioner_image_tag: 92295a30 -cephfs_provisioner_namespace: "{{ system_namespace }}" +cephfs_provisioner_namespace: "kube-system" cephfs_provisioner_cluster: ceph cephfs_provisioner_monitors: [] cephfs_provisioner_admin_id: admin diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index dd2e8a147..ea5dcb079 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner local_volume_provisioner_image_tag: v2.0.0 -local_volume_provisioner_namespace: "{{ system_namespace }}" +local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks local_volume_provisioner_storage_class: local-storage diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 06e97aff2..e7b387944 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -18,7 +18,7 @@ - name: Helm | Apply Helm Manifests (RBAC) kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -28,7 +28,7 @@ - name: Helm | Install/upgrade helm command: > - {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }} + {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system {% if helm_skip_refresh %} --skip-refresh{% endif %} {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %} {% if rbac_enabled %} --service-account=tiller{% endif %} diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml index 0c8db4c78..00694181e 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: tiller - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml index 26e575fb6..606dbb147 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index f17e45c7a..4c8295c1e 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -2,7 +2,7 @@ - name: Start Calico resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index cbe4f0ac7..3640fe762 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -2,7 +2,7 @@ - name: Canal | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml index 2359fe2d4..5d90bdb01 100755 --- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml @@ -2,7 +2,7 @@ - name: Cilium | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -11,7 +11,7 @@ when: inventory_hostname == groups['kube-master'][0] and not item|skipped - name: Cilium | Wait for pods to run - command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" + command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml index 330acc1cd..5289296dc 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml @@ -3,7 +3,7 @@ - name: Contiv | Create Kubernetes resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ contiv_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index 09603a794..bdf954bf9 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Flannel | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index 66d900d55..53ad953b5 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -5,7 +5,7 @@ kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/weave-net.yml" resource: "ds" - namespace: "{{system_namespace}}" + namespace: "kube-system" state: "latest" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index ba1162799..62e929f41 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -12,7 +12,7 @@ name: calico-policy-controller kubectl: "{{bin_dir}}/kubectl" resource: rs - namespace: "{{ system_namespace }}" + namespace: "kube-system" state: absent run_once: true @@ -32,7 +32,7 @@ - name: Start of Calico kube controllers kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 index 7e1311b92..d7083e3e6 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -2,7 +2,7 @@ apiVersion: apps/v1beta2 kind: Deployment metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-kube-controllers kubernetes.io/cluster-service: "true" @@ -15,7 +15,7 @@ spec: template: metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" k8s-app: calico-kube-controllers diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 index 82c2f3e44..d05e986a4 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 index 38853a413..2e5118481 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 index bf8958976..e42e89d18 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml index 93d1cfa2a..a626435d5 100644 --- a/roles/kubernetes-apps/registry/defaults/main.yml +++ b/roles/kubernetes-apps/registry/defaults/main.yml @@ -4,6 +4,6 @@ registry_image_tag: 2.6 registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy registry_proxy_image_tag: 0.4 -registry_namespace: "{{ system_namespace }}" +registry_namespace: "kube-system" registry_storage_class: "" registry_disk_size: "10Gi" diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 52101ae16..3884a3a65 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -44,5 +44,5 @@ when: needs_rotation - name: Rotate Tokens | Delete pods in system namespace - command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all" + command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all" when: needs_rotation diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 350a27a18..0a4e3e661 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-apiserver - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-apiserver kubespray: v2 diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 2b4282a2e..99eef9b39 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-controller-manager - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-controller-manager annotations: diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index b13fc7fa3..a4023365e 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-scheduler annotations: diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml deleted file mode 100644 index a5eba4f2b..000000000 --- a/roles/kubernetes/master/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -namespace_kubesystem: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{system_namespace}}" diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 57c2269a9..18e51069f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-proxy annotations: diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index 2d566cad1..a1e9a7815 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: nginx-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-nginx spec: diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4828de6af..f2c5dcd04 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -61,7 +61,6 @@ dns_domain: "{{ cluster_name }}" kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index 92d2f1f0a..3be65deaa 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: calico-config - namespace: {{ system_namespace }} + namespace: kube-system data: etcd_endpoints: "{{ etcd_access_addresses }}" etcd_ca: "/calico-secrets/ca_cert.crt" diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 index 47d626659..cef8331f3 100644 --- a/roles/network_plugin/calico/templates/calico-cr.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 index 2e132a0dc..1b4e8fe00 100644 --- a/roles/network_plugin/calico/templates/calico-crb.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 index 5cce29793..68b1c286f 100644 --- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 6ec3cd20b..849ea0afb 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -6,7 +6,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-node spec: diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 index e3b048c64..2e92b7b2b 100644 --- a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 index e1c1f5050..016e5193e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 index 3b00017b1..097b1538e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 index d5b9a6e97..aa168d15c 100644 --- a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: canal - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 index d63bf99b0..8535360a1 100644 --- a/roles/network_plugin/canal/templates/canal-node.yaml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: canal-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: canal-node spec: diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 index a96bb8531..c5051e2ca 100755 --- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: cilium-config - namespace: {{ system_namespace }} + namespace: kube-system data: # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you uncomment the ca-file line and add the respective diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 index dcfe4d471..04d603d57 100755 --- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 @@ -10,6 +10,6 @@ roleRef: subjects: - kind: ServiceAccount name: cilium - namespace: {{ system_namespace }} + namespace: kube-system - kind: Group name: system:nodes diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 index 3d877a5cb..8eaa24f32 100755 --- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system spec: template: metadata: diff --git a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 index d6ef2a431..c03ac59b4 100755 --- a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 @@ -3,4 +3,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 index 140379b13..3ccaffaf8 100644 --- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 index 0505cd1f1..249d9d88e 100644 --- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 @@ -5,7 +5,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: contiv-config - namespace: {{ system_namespace }} + namespace: kube-system data: # The location of your cluster store. This is set to the # avdertise-client value below from the contiv-etcd service. diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 index a9690cc2f..75946d821 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd-proxy spec: diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 index 8060f4c01..a6e9121d4 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd spec: diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 index 82ca00437..6ccd4f9b4 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 index 74c5e3145..73d636775 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 index 0c1bfb3e5..758ea4493 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 index 56be2d93d..d41259ec1 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 index c26e094ed..af4c6e584 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 index 0c989008a..6cac217fc 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 index edfac8bb3..8d00ec8cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 index 9c2c0a036..2a7bf71cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 @@ -5,7 +5,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netplugin spec: diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 index aafe2a0f5..6f5c9a211 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -41,4 +41,4 @@ roleRef: subjects: - kind: ServiceAccount name: flannel - namespace: "{{system_namespace}}" \ No newline at end of file + namespace: "kube-system" \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index bb2a6a7f8..7ecb21ad0 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -3,7 +3,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node app: flannel @@ -41,7 +41,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: kube-flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node k8s-app: flannel diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 699ba3128..9a7da7377 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -8,14 +8,14 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -41,7 +41,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: weave-net @@ -49,14 +49,14 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -79,7 +79,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: Role name: weave-net @@ -87,7 +87,7 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: extensions/v1beta1 kind: DaemonSet metadata: @@ -95,7 +95,7 @@ items: labels: name: weave-net version: v{{ weave_version }} - namespace: {{ system_namespace }} + namespace: kube-system spec: minReadySeconds: 5 template: diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 9a3e83035..8e5ad08a0 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -86,7 +86,7 @@ vault_ca_options: format: pem ttl: "{{ vault_max_lease_ttl }}" exclude_cn_from_sans: true - alt_names: "vault.{{ system_namespace }}.svc.{{ dns_domain }},vault.{{ system_namespace }}.svc,vault.{{ system_namespace }},vault" + alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault" etcd: common_name: etcd format: pem From 13c57147ebc8e0ae123ca0a979a12b9566aaaf1d Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 30 Mar 2018 09:48:55 -0400 Subject: [PATCH 24/33] only set no_proxy if other proxy vars are defined --- roles/kubespray-defaults/defaults/main.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4828de6af..47ff298fe 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -241,6 +241,7 @@ weave_peers: uninitialized ## Set no_proxy to all assigned cluster IPs and hostnames no_proxy: >- + {%- if http_proxy is defined or https_proxy is defined %} {%- if loadbalancer_apiserver is defined -%} {{ apiserver_loadbalancer_domain_name| default('') }}, {{ loadbalancer_apiserver.address | default('') }}, @@ -254,11 +255,12 @@ no_proxy: >- {{ item }},{{ item }}.{{ dns_domain }}, {%- endfor -%} 127.0.0.1,localhost + {%- endif %} proxy_env: http_proxy: "{{ http_proxy| default ('') }}" https_proxy: "{{ https_proxy| default ('') }}" - no_proxy: "{{ no_proxy }}" + no_proxy: "{{ no_proxy| default ('') }}" # Vars for pointing to kubernetes api endpoints is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" From 72c2a8982b01c09796214fc29866c4fccf281170 Mon Sep 17 00:00:00 2001 From: avoidik Date: Fri, 30 Mar 2018 17:24:50 +0300 Subject: [PATCH 25/33] Fix kubecert_node.results indexes --- roles/kubernetes/secrets/tasks/check-certs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 627889771..4780b14d6 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -105,9 +105,9 @@ {%- set certs = {'sync': False} -%} {% if gen_node_certs[inventory_hostname] or (not kubecert_node.results[0].stat.exists|default(False)) or - (not kubecert_node.results[10].stat.exists|default(False)) or - (not kubecert_node.results[7].stat.exists|default(False)) or - (kubecert_node.results[10].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[10].stat.path)|map(attribute="checksum")|first|default('')) -%} + (not kubecert_node.results[12].stat.exists|default(False)) or + (not kubecert_node.results[8].stat.exists|default(False)) or + (kubecert_node.results[12].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[12].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} From e296ccb4d00db2de9bfeef3b7da91482e5e62e28 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 30 Mar 2018 12:31:38 -0400 Subject: [PATCH 26/33] include do extension for jinja --- ansible.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible.cfg b/ansible.cfg index d3102a6f4..6f381690e 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -13,3 +13,4 @@ callback_whitelist = profile_tasks roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles deprecation_warnings=False inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds +jinja2_extensions = jinja2.ext.do From 572ab650dbd2f091e6d8a7c6321b1f04f1825222 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Fri, 30 Mar 2018 13:00:01 -0500 Subject: [PATCH 27/33] copy dedicated service account token signing key for kubeadm migration --- roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml index a9f938318..58eaaa66f 100644 --- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml +++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml @@ -9,4 +9,6 @@ - {src: apiserver-key.pem, dest: apiserver.key} - {src: ca.pem, dest: ca.crt} - {src: ca-key.pem, dest: ca.key} + - {src: service-account-key.pem, dest: sa.pub} + - {src: service-account-key.pem, dest: sa.key} register: kubeadm_copy_old_certs From 859a7f32fb361646a4ae1cfa037aad7ba957dc5a Mon Sep 17 00:00:00 2001 From: woopstar Date: Sat, 31 Mar 2018 00:06:32 +0200 Subject: [PATCH 28/33] Fix import task. Has to be include task to evalutate etcd_cluster_setup variable at run time --- roles/etcd/tasks/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index bb299126b..a64d9b097 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -29,13 +29,13 @@ tags: - upgrade -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: configure.yml +- include_tasks: configure.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup - name: Restart etcd if certs changed @@ -68,8 +68,8 @@ # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing` # state insted of `new`. -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup From 8ece922ef093ebf09d1b21a9ba99e79b054aee18 Mon Sep 17 00:00:00 2001 From: Erwan Miran Date: Sat, 31 Mar 2018 00:30:42 +0200 Subject: [PATCH 29/33] node_labels documentation + kube-ingress label handling as role_node_label --- docs/vars.md | 8 +++++++- roles/kubernetes/node/templates/kubelet.standard.env.j2 | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index f4956c882..a4ae65678 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -119,7 +119,13 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st cgroup-driver option for Kubelet. By default autodetection is used to match Docker configuration. * *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. - For example, labels can be set in the inventory as variables or more widely in group_vars + For example, labels can be set in the inventory as variables or more widely in group_vars. + *node_labels* must be defined as a dict: +``` +node_labels: + label1_name: label1_value + label2_name: label2_value +``` ##### Custom flags for Kube Components For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example: diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 50a5441e0..cd48fca9c 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -88,7 +88,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% elif inventory_hostname in groups['kube-ingress']|default([]) %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% else %} {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} From 26caad4f12836ebee1a1202f8bd25e3f5ed9b2b0 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 02:38:01 +0300 Subject: [PATCH 30/33] Allow ansible_ssh_private_key_file for Openstack --- contrib/terraform/openstack/ansible_bastion_template.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/terraform/openstack/ansible_bastion_template.txt b/contrib/terraform/openstack/ansible_bastion_template.txt index cdf012066..a304b2c9d 100644 --- a/contrib/terraform/openstack/ansible_bastion_template.txt +++ b/contrib/terraform/openstack/ansible_bastion_template.txt @@ -1 +1 @@ -ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"' +ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" From 2c89a02db3af9333a930d0c5b80b221afbdc5562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Sat, 31 Mar 2018 04:40:01 +0200 Subject: [PATCH 31/33] Only download container/file if host is in defined group (#2565) * Only download container/file if host is in defined group * Set correct when clause * Fix last entries * Update download groups --- roles/download/defaults/main.yml | 70 ++++++++++++++++++++- roles/download/tasks/download_container.yml | 3 + roles/download/tasks/download_file.yml | 3 + roles/download/tasks/sync_container.yml | 9 +++ 4 files changed, 83 insertions(+), 2 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 74a3aaaf8..74594ead3 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -140,18 +140,24 @@ downloads: repo: "{{ netcheck_server_img_repo }}" tag: "{{ netcheck_server_tag }}" sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + groups: + - k8s-cluster netcheck_agent: enabled: "{{ deploy_netchecker }}" container: true repo: "{{ netcheck_agent_img_repo }}" tag: "{{ netcheck_agent_tag }}" sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + groups: + - k8s-cluster etcd: enabled: true container: true repo: "{{ etcd_image_repo }}" tag: "{{ etcd_image_tag }}" sha256: "{{ etcd_digest_checksum|default(None) }}" + groups: + - etcd kubeadm: enabled: "{{ kubeadm_enabled }}" file: true @@ -163,6 +169,8 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - k8s-cluster istioctl: enabled: "{{ istio_enabled }}" file: true @@ -174,140 +182,186 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - kube-master hyperkube: enabled: true container: true repo: "{{ hyperkube_image_repo }}" tag: "{{ hyperkube_image_tag }}" sha256: "{{ hyperkube_digest_checksum|default(None) }}" + groups: + - k8s-cluster cilium: enabled: "{{ kube_network_plugin == 'cilium' }}" container: true repo: "{{ cilium_image_repo }}" tag: "{{ cilium_image_tag }}" sha256: "{{ cilium_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel: enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" container: true repo: "{{ flannel_image_repo }}" tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel_cni: enabled: "{{ kube_network_plugin == 'flannel' }}" container: true repo: "{{ flannel_cni_image_repo }}" tag: "{{ flannel_cni_image_tag }}" sha256: "{{ flannel_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calicoctl: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calicoctl_image_repo }}" tag: "{{ calicoctl_image_tag }}" sha256: "{{ calicoctl_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_node: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_node_image_repo }}" tag: "{{ calico_node_image_tag }}" sha256: "{{ calico_node_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_cni: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_cni_image_repo }}" tag: "{{ calico_cni_image_tag }}" sha256: "{{ calico_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_policy: enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_policy_image_repo }}" tag: "{{ calico_policy_image_tag }}" sha256: "{{ calico_policy_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_rr: enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}" container: true repo: "{{ calico_rr_image_repo }}" tag: "{{ calico_rr_image_tag }}" sha256: "{{ calico_rr_digest_checksum|default(None) }}" + groups: + - calico-rr weave_kube: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_kube_image_repo }}" tag: "{{ weave_kube_image_tag }}" sha256: "{{ weave_kube_digest_checksum|default(None) }}" + groups: + - k8s-cluster weave_npc: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_npc_image_repo }}" tag: "{{ weave_npc_image_tag }}" sha256: "{{ weave_npc_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_image_repo }}" tag: "{{ contiv_image_tag }}" sha256: "{{ contiv_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv_auth_proxy: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_auth_proxy_image_repo }}" tag: "{{ contiv_auth_proxy_image_tag }}" sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}" + groups: + - k8s-cluster pod_infra: enabled: true container: true repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" + groups: + - k8s-cluster install_socat: enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}" container: true repo: "{{ install_socat_image_repo }}" tag: "{{ install_socat_image_tag }}" sha256: "{{ install_socat_digest_checksum|default(None) }}" + groups: + - k8s-cluster nginx: - enabled: true + enabled: "{{ loadbalancer_apiserver_localhost }}" container: true repo: "{{ nginx_image_repo }}" tag: "{{ nginx_image_tag }}" sha256: "{{ nginx_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq: enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}" container: true repo: "{{ dnsmasq_image_repo }}" tag: "{{ dnsmasq_image_tag }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}" + groups: + - kube-node kubedns: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubedns_image_repo }}" tag: "{{ kubedns_image_tag }}" sha256: "{{ kubedns_digest_checksum|default(None) }}" + groups: + - kube-node coredns: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" container: true repo: "{{ coredns_image_repo }}" tag: "{{ coredns_image_tag }}" sha256: "{{ coredns_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_nanny: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_nanny_image_repo }}" tag: "{{ dnsmasq_nanny_image_tag }}" sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_sidecar: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_sidecar_image_repo }}" tag: "{{ dnsmasq_sidecar_image_tag }}" sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}" + groups: + - kube-node kubednsautoscaler: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubednsautoscaler_image_repo }}" tag: "{{ kubednsautoscaler_image_tag }}" sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}" + groups: + - kube-node testbox: - enabled: true + enabled: false container: true repo: "{{ test_image_repo }}" tag: "{{ test_image_tag }}" @@ -318,30 +372,40 @@ downloads: repo: "{{ elasticsearch_image_repo }}" tag: "{{ elasticsearch_image_tag }}" sha256: "{{ elasticsearch_digest_checksum|default(None) }}" + groups: + - kube-node fluentd: enabled: "{{ efk_enabled }}" container: true repo: "{{ fluentd_image_repo }}" tag: "{{ fluentd_image_tag }}" sha256: "{{ fluentd_digest_checksum|default(None) }}" + groups: + - kube-node kibana: enabled: "{{ efk_enabled }}" container: true repo: "{{ kibana_image_repo }}" tag: "{{ kibana_image_tag }}" sha256: "{{ kibana_digest_checksum|default(None) }}" + groups: + - kube-node helm: enabled: "{{ helm_enabled }}" container: true repo: "{{ helm_image_repo }}" tag: "{{ helm_image_tag }}" sha256: "{{ helm_digest_checksum|default(None) }}" + groups: + - kube-node tiller: enabled: "{{ helm_enabled }}" container: true repo: "{{ tiller_image_repo }}" tag: "{{ tiller_image_tag }}" sha256: "{{ tiller_digest_checksum|default(None) }}" + groups: + - kube-node vault: enabled: "{{ cert_management == 'vault' }}" container: "{{ vault_deployment_type != 'host' }}" @@ -356,6 +420,8 @@ downloads: unarchive: true url: "{{ vault_download_url }}" version: "{{ vault_version }}" + groups: + - vault download_defaults: container: false diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index bbf7cec85..a5659619c 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -23,6 +24,7 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length delegate_to: "{{ download_delegate }}" delegate_facts: yes run_once: yes @@ -38,3 +40,4 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index 664fa4728..832fec41e 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -13,6 +13,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Download item get_url: @@ -28,6 +29,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Extract archives unarchive: @@ -40,3 +42,4 @@ - download.enabled - download.file - download.unarchive|default(False) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml index a15f78cde..1ca84ad67 100644 --- a/roles/download/tasks/sync_container.yml +++ b/roles/download/tasks/sync_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -17,6 +18,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -27,6 +29,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length - name: "container_download | Update the 'container_changed' fact" set_fact: @@ -36,6 +39,7 @@ - download.container - download_run_once - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length run_once: "{{ download_run_once }}" tags: - facts @@ -53,6 +57,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -68,6 +73,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") - (container_changed or not img.stat.exists) + - group_names | intersect(download.groups) | length - name: container_download | copy container images to ansible host synchronize: @@ -87,6 +93,7 @@ - inventory_hostname == download_delegate - download_delegate != "localhost" - saved.changed + - group_names | intersect(download.groups) | length - name: container_download | upload container images to nodes synchronize: @@ -108,6 +115,7 @@ - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade @@ -120,6 +128,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade From 195d6d791add01e0f9723b5ffcdfe551831f3d2b Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 28 Mar 2018 22:30:00 +0800 Subject: [PATCH 32/33] Integrate jetstack/cert-manager 0.2.3 to Kubespray --- inventory/sample/group_vars/k8s-cluster.yml | 4 ++ roles/download/defaults/main.yml | 22 +++++++- roles/etcd/defaults/main.yml | 4 +- .../ingress_controller/cert_manager/README.md | 17 +++++++ .../cert_manager/defaults/main.yml | 6 +++ .../cert_manager/tasks/main.yml | 38 ++++++++++++++ .../cert-manager-certificate-crd.yml.j2 | 21 ++++++++ .../cert-manager-clusterissuer-crd.yml.j2 | 17 +++++++ .../templates/cert-manager-clusterrole.yml.j2 | 25 +++++++++ .../cert-manager-clusterrolebinding.yml.j2 | 18 +++++++ .../templates/cert-manager-deploy.yml.j2 | 51 +++++++++++++++++++ .../templates/cert-manager-issuer-crd.yml.j2 | 17 +++++++ .../templates/cert-manager-ns.yml.j2 | 7 +++ .../templates/cert-manager-sa.yml.j2 | 11 ++++ .../ingress_controller/meta/main.yml | 7 +++ roles/kubernetes/master/defaults/main.yml | 3 +- roles/kubespray-defaults/defaults/main.yaml | 1 + roles/network_plugin/calico/defaults/main.yml | 2 +- tests/files/gce_centos7-flannel-addons.yml | 2 + 19 files changed, 268 insertions(+), 5 deletions(-) create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/README.md create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 694368954..96a301f7d 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -207,6 +207,10 @@ ingress_nginx_enabled: false # ingress_nginx_configmap_udp_services: # 53: "kube-system/kube-dns:53" +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" + # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 74594ead3..21b6bc72d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}" kibana_version: "v4.6.1" kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_tag: "{{ kibana_version }}" - helm_version: "v2.8.1" helm_image_repo: "lachlanevenson/k8s-helm" helm_image_tag: "{{ helm_version }}" @@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" +cert_manager_version: "v0.2.3" +cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" +cert_manager_controller_image_tag: "{{ cert_manager_version }}" +cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" +cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}" downloads: netcheck_server: @@ -422,6 +426,22 @@ downloads: version: "{{ vault_version }}" groups: - vault + cert_manager_controller: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_controller_image_repo }}" + tag: "{{ cert_manager_controller_image_tag }}" + sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + groups: + - kube-node + cert_manager_ingress_shim: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_ingress_shim_image_repo }}" + tag: "{{ cert_manager_ingress_shim_image_tag }}" + sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}" + groups: + - kube-node download_defaults: container: false diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 1268c13c7..6c13810c5 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts" etcd_heartbeat_interval: "250" etcd_election_timeout: "5000" -#etcd_snapshot_count: "10000" +# etcd_snapshot_count: "10000" # Parameters for ionice # -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle. # -n takes an integer between 0 (highest priority) and 7 (lowest priority) -#etcd_ionice: "-c2 -n0" +# etcd_ionice: "-c2 -n0" etcd_metrics: "basic" diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md new file mode 100644 index 000000000..b0f008676 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md @@ -0,0 +1,17 @@ +Deployment files +================ + +This directory contains example deployment manifests for cert-manager that can +be used in place of the official Helm chart. + +This is useful if you are deploying cert-manager into an environment without +Helm, or want to inspect a 'bare minimum' deployment. + +Where do these come from? +------------------------- + +The manifests in these subdirectories are generated from the Helm chart +automatically. The `values.yaml` files used to configure cert-manager can be +found in [`hack/deploy`](../../hack/deploy/). + +They are automatically generated by running `./hack/update-deploy-gen.sh`. diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml new file mode 100644 index 000000000..bc6bceb15 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -0,0 +1,6 @@ +--- +cert_manager_namespace: "cert-manager" +cert_manager_cpu_requests: 10m +cert_manager_cpu_limits: 30m +cert_manager_memory_requests: 32Mi +cert_manager_memory_limits: 200Mi diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml new file mode 100644 index 000000000..eeb29da2d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- name: Cert Manager | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: directory + owner: root + group: root + mode: 0755 + +- name: Cert Manager | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + with_items: + - { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns } + - { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa } + - { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole } + - { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding } + - { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd } + - { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd } + - { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd } + - { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy } + register: cert_manager_manifests + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Cert Manager | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ cert_manager_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" + state: "latest" + with_items: "{{ cert_manager_manifests.results }}" + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 new file mode 100644 index 000000000..48d0c5b49 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 @@ -0,0 +1,21 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + scope: Namespaced + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 new file mode 100644 index 000000000..86601e098 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: ClusterIssuer + plural: clusterissuers + scope: Cluster diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 new file mode 100644 index 000000000..9d36de5cb --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "issuers", "clusterissuers"] + verbs: ["*"] + - apiGroups: [""] + # TODO: remove endpoints once 0.4 is released. We include it here in case + # users use the 'master' version of the Helm chart with a 0.2.x release of + # cert-manager that still performs leader election with Endpoint resources. + # We advise users don't do this, but some will anyway and this will reduce + # friction. + resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"] + verbs: ["*"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["*"] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..d0e481c6c --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 new file mode 100644 index 000000000..ef66bef05 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: cert-manager + release: cert-manager + annotations: + spec: + serviceAccountName: cert-manager + containers: + - name: cert-manager + image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --cluster-resource-namespace=$(POD_NAMESPACE) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + + - name: ingress-shim + image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 new file mode 100644 index 000000000..7e344d9f9 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: issuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: Issuer + plural: issuers + scope: Namespaced diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 new file mode 100644 index 000000000..7cf3a282d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cert_manager_namespace }} + labels: + name: {{ cert_manager_namespace }} diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 new file mode 100644 index 000000000..ccdd5f430 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml index da2e03ecc..617e9d9a7 100644 --- a/roles/kubernetes-apps/ingress_controller/meta/main.yml +++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -6,3 +6,10 @@ dependencies: - apps - ingress-nginx - ingress-controller + + - role: kubernetes-apps/ingress_controller/cert_manager + when: cert_manager_enabled + tags: + - apps + - cert-manager + - ingress-controller diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 303c1a88a..6325bb31c 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -96,4 +96,5 @@ volume_cross_zone_attachment: false ## Encrypting Secret Data at Rest kube_encrypt_secret_data: false kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" -kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm +# Must be either: aescbc, secretbox or aesgcm +kube_encryption_algorithm: "aescbc" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index b6f46eb5a..d6217d654 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -174,6 +174,7 @@ local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') } persistent_volumes_enabled: false cephfs_provisioner_enabled: false ingress_nginx_enabled: false +cert_manager_enabled: false ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) # openstack_blockstorage_version: "v1/v2/auto (default)" diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 1b0cd0421..857ebd11a 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -50,4 +50,4 @@ rbac_resources: # * can-reach=DESTINATION # * interface=INTERFACE-REGEX # see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods -#calico_ip_auto_method: "interface=eth.*" +# calico_ip_auto_method: "interface=eth.*" diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 8ac8a901b..9e2e1083f 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -16,3 +16,5 @@ deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce kube_encrypt_secret_data: true +ingress_nginx_enabled: true +cert_manager_enabled: true From b9b028a7350ff2bab9788f2999ceba79ddc5edb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Sat, 31 Mar 2018 20:06:09 +0200 Subject: [PATCH 33/33] Update etcd deployment to use correct cert and key (#2572) * Update etcd deployment to use correct cert and key * Update to use admin cert for etcdctl commands * Update handler to use admin cert too --- roles/etcd/handlers/backup.yml | 4 ++-- roles/etcd/tasks/configure.yml | 8 ++++---- roles/etcd/tasks/join_etcd-events_member.yml | 8 ++++---- roles/etcd/tasks/join_etcd_member.yml | 8 ++++---- roles/etcd/tasks/join_member.yml | 8 ++++---- roles/etcd/tasks/set_cluster_health.yml | 8 ++++---- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 247b2ae00..a0a80e108 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -48,7 +48,7 @@ snapshot save {{ etcd_backup_directory }}/snapshot.db environment: ETCDCTL_API: 3 - ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" retries: 3 delay: "{{ retry_stagger | random + 3 }}" diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index d7d3920c6..d39ba62d4 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if member is in etcd-events cluster shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}" @@ -22,8 +22,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Copy etcd.service systemd file template: diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index 104ef22df..5a7061880 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_member.yml b/roles/etcd/tasks/join_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_member.yml +++ b/roles/etcd/tasks/join_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml index 68e738031..d0202943c 100644 --- a/roles/etcd/tasks/set_cluster_health.yml +++ b/roles/etcd/tasks/set_cluster_health.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if etcd-events cluster is healthy shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'" @@ -22,5 +22,5 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"