From 4c280e59d43d237706dfb6279493d3289edb5da9 Mon Sep 17 00:00:00 2001 From: Miouge1 Date: Fri, 16 Feb 2018 13:43:35 +0100 Subject: [PATCH 01/82] Use legacy policy config to apply the scheduler policy --- .../master/templates/manifests/kube-scheduler.manifest.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index b13fc7fa3..e42be474b 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -29,6 +29,7 @@ spec: - --leader-elect=true - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml {% if volume_cross_zone_attachment %} + - --use-legacy-policy-config - --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml {% endif %} - --profiling=false From 14ac7d797b9573bc7a78a28e983080c1cf580ebb Mon Sep 17 00:00:00 2001 From: Andreas Holmsten Date: Mon, 19 Mar 2018 13:04:18 +0100 Subject: [PATCH 02/82] Rotate local-volume-provisioner token When tokens need to rotate, include local-volume-provisioner --- roles/kubernetes-apps/rotate_tokens/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 52101ae16..4abc7d730 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -34,7 +34,7 @@ {{ bin_dir }}/kubectl get secrets --all-namespaces -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' | grep kubernetes.io/service-account-token - | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller' + | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner' register: tokens_to_delete when: needs_rotation From 3c12c6beb35e0f115e8eccdf0732b54614314a67 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 02:59:59 +0300 Subject: [PATCH 03/82] Move cloud config configurations to proper location --- .../{preinstall => node}/templates/azure-cloud-config.j2 | 0 .../{preinstall => node}/templates/openstack-cloud-config.j2 | 0 .../{preinstall => node}/templates/vsphere-cloud-config.j2 | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/{preinstall => node}/templates/azure-cloud-config.j2 (100%) rename roles/kubernetes/{preinstall => node}/templates/openstack-cloud-config.j2 (100%) rename roles/kubernetes/{preinstall => node}/templates/vsphere-cloud-config.j2 (100%) diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/node/templates/azure-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/azure-cloud-config.j2 rename to roles/kubernetes/node/templates/azure-cloud-config.j2 diff --git a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 rename to roles/kubernetes/node/templates/openstack-cloud-config.j2 diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 rename to roles/kubernetes/node/templates/vsphere-cloud-config.j2 From b6da596ec1e2b03ac326dc9f85926debb34de5bb Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:18:23 +0300 Subject: [PATCH 04/82] Move default configuration parameters for cloud-config --- roles/kubernetes/node/defaults/main.yml | 43 +++++++++++++++++++ roles/kubernetes/preinstall/defaults/main.yml | 29 ------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 2cbf56e1d..52ca8d59d 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -92,3 +92,46 @@ kube_cadvisor_port: 0 # The read-only port for the Kubelet to serve on with no authentication/authorization. kube_read_only_port: 0 + + +# For the openstack integration kubelet will need credentials to access +# openstack apis like nova and cinder. Per default this values will be +# read from the environment. +openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +openstack_username: "{{ lookup('env','OS_USERNAME') }}" +openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" +openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" + +# For the vsphere integration, kubelet will need credentials to access +# vsphere apis +# Documentation regarding these values can be found +# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 +vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" +vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" +vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" +vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" +vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" +vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" +vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" +vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" + +vsphere_scsi_controller_type: pvscsi +# vsphere_public_network is name of the network the VMs are joined to +vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" + +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values +#azure_tenant_id: +#azure_subscription_id: +#azure_aad_client_id: +#azure_aad_client_secret: +#azure_resource_group: +#azure_location: +#azure_subnet_name: +#azure_security_group_name: +#azure_vnet_name: +#azure_route_table_name: diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 295f10178..149cbb42a 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -23,35 +23,6 @@ disable_ipv6_dns: false kube_cert_group: kube-cert kube_config_dir: /etc/kubernetes -# For the openstack integration kubelet will need credentials to access -# openstack apis like nova and cinder. Per default this values will be -# read from the environment. -openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -openstack_username: "{{ lookup('env','OS_USERNAME') }}" -openstack_password: "{{ lookup('env','OS_PASSWORD') }}" -openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" -openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" -openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" - -# For the vsphere integration, kubelet will need credentials to access -# vsphere apis -# Documentation regarding these values can be found -# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 -vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" -vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" -vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" -vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" -vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" -vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" -vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" -vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" -vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" - -vsphere_scsi_controller_type: pvscsi -# vsphere_public_network is name of the network the VMs are joined to -vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" - # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # for hostnet pods and infra needs resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf From ab8760cc83a6bb6f9e33723e6348fe30bbb358c8 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:24:57 +0300 Subject: [PATCH 05/82] Move credentials pre-check --- roles/kubernetes/node/tasks/main.yml | 8 ++++++++ roles/kubernetes/preinstall/tasks/main.yml | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 78e6d92d6..defd3e9f7 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -134,6 +134,14 @@ tags: - kube-proxy +- include_tasks: "{{ cloud_provider }}-credential-check.yml" + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + tags: + - cloud-provider + - facts + - name: Write cloud-config template: src: "{{ cloud_provider }}-cloud-config.j2" diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index aca0c9606..4b948831a 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -71,14 +71,6 @@ - cloud-provider - facts -- include_tasks: "{{ cloud_provider }}-credential-check.yml" - when: - - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] - tags: - - cloud-provider - - facts - - name: Create cni directories file: path: "{{ item }}" From 15efdf0c16724fa5389c35e27b01cb12ae1f3557 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:26:37 +0300 Subject: [PATCH 06/82] Move credential checks --- .../tasks => node/templates}/azure-credential-check.yml | 0 .../tasks => node/templates}/openstack-credential-check.yml | 0 .../tasks => node/templates}/vsphere-credential-check.yml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/{preinstall/tasks => node/templates}/azure-credential-check.yml (100%) rename roles/kubernetes/{preinstall/tasks => node/templates}/openstack-credential-check.yml (100%) rename roles/kubernetes/{preinstall/tasks => node/templates}/vsphere-credential-check.yml (100%) diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/node/templates/azure-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/azure-credential-check.yml rename to roles/kubernetes/node/templates/azure-credential-check.yml diff --git a/roles/kubernetes/preinstall/tasks/openstack-credential-check.yml b/roles/kubernetes/node/templates/openstack-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/openstack-credential-check.yml rename to roles/kubernetes/node/templates/openstack-credential-check.yml diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/node/templates/vsphere-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml rename to roles/kubernetes/node/templates/vsphere-credential-check.yml From aa301c31d10643750da91600b7915601cb99aad1 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 13:29:00 +0300 Subject: [PATCH 07/82] Move credential checks into proper folder --- .../node/{templates => tasks}/azure-credential-check.yml | 0 .../node/{templates => tasks}/openstack-credential-check.yml | 0 .../node/{templates => tasks}/vsphere-credential-check.yml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/node/{templates => tasks}/azure-credential-check.yml (100%) rename roles/kubernetes/node/{templates => tasks}/openstack-credential-check.yml (100%) rename roles/kubernetes/node/{templates => tasks}/vsphere-credential-check.yml (100%) diff --git a/roles/kubernetes/node/templates/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/azure-credential-check.yml rename to roles/kubernetes/node/tasks/azure-credential-check.yml diff --git a/roles/kubernetes/node/templates/openstack-credential-check.yml b/roles/kubernetes/node/tasks/openstack-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/openstack-credential-check.yml rename to roles/kubernetes/node/tasks/openstack-credential-check.yml diff --git a/roles/kubernetes/node/templates/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/vsphere-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/vsphere-credential-check.yml rename to roles/kubernetes/node/tasks/vsphere-credential-check.yml From b1a7889ff52412e7e7b03a7122bc84a39960c84d Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 31 Mar 2018 19:25:05 +0800 Subject: [PATCH 08/82] local-volume-provisioner: container download related things should defined in the download role --- roles/download/defaults/main.yml | 10 ++++++++++ .../local_volume_provisioner/defaults/main.yml | 3 --- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 725fc0bbd..48f4743b1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -134,6 +134,8 @@ registry_image_repo: "registry" registry_image_tag: "2.6" registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" registry_proxy_image_tag: "0.4" +local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner" +local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" @@ -451,6 +453,14 @@ downloads: sha256: "{{ registry_proxy_digest_checksum|default(None) }}" groups: - kube-node + local_volume_provisioner: + enabled: "{{ local_volume_provisioner_enabled }}" + container: true + repo: "{{ local_volume_provisioner_image_repo }}" + tag: "{{ local_volume_provisioner_image_tag }}" + sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" + groups: + - kube-node cephfs_provisioner: enabled: "{{ cephfs_provisioner_enabled }}" container: true diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index ea5dcb079..4b18546d3 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -1,7 +1,4 @@ --- -local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner -local_volume_provisioner_image_tag: v2.0.0 - local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks From 3004791c6469181a83d80971110813a3cd3ce658 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 2 Apr 2018 11:19:23 +0300 Subject: [PATCH 09/82] Add pre-upgrade task for moving credentials file (#2394) * Add pre-upgrade task for moving credentials file This reverts commit 7ef9f4dfdd7d64876aacc48a982313dbea8a06f5. * add python interpreter workaround for localhost --- .gitignore | 1 + .gitlab-ci.yml | 2 -- roles/kubernetes/preinstall/tasks/main.yml | 5 ++++ .../preinstall/tasks/pre_upgrade.yml | 28 +++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 roles/kubernetes/preinstall/tasks/pre_upgrade.yml diff --git a/.gitignore b/.gitignore index fcbcd1da1..8da099d42 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .vagrant *.retry inventory/vagrant_ansible_inventory +inventory/credentials/ inventory/group_vars/fake_hosts.yml inventory/host_vars/ temp diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1014440ab..5af631476 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -109,7 +109,6 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e @${CI_TEST_VARS} - -e ansible_python_interpreter=${PYPATH} -e ansible_ssh_user=${SSH_USER} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" @@ -129,7 +128,6 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e @${CI_TEST_VARS} - -e ansible_python_interpreter=${PYPATH} -e ansible_ssh_user=${SSH_USER} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index aca0c9606..db7bfa00f 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -3,6 +3,11 @@ tags: - asserts +# This is run before bin_dir is pinned because these tasks are run on localhost +- import_tasks: pre_upgrade.yml + tags: + - upgrade + - name: Force binaries directory for Container Linux by CoreOS set_fact: bin_dir: "/opt/bin" diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml new file mode 100644 index 000000000..63cbc9be1 --- /dev/null +++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml @@ -0,0 +1,28 @@ +--- +- name: "Pre-upgrade | check if old credential dir exists" + local_action: + module: stat + path: "{{ inventory_dir }}/../credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + register: old_credential_dir + become: no + +- name: "Pre-upgrade | check if new credential dir exists" + local_action: + module: stat + path: "{{ inventory_dir }}/credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + register: new_credential_dir + become: no + when: old_credential_dir.stat.exists + +- name: "Pre-upgrade | move data from old credential dir to new" + local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials + args: + creates: "{{ inventory_dir }}/credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + become: no + when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists From 4b98537f795353f61dacac51b956cc8c028c261a Mon Sep 17 00:00:00 2001 From: vterdunov Date: Mon, 2 Apr 2018 18:45:42 +0300 Subject: [PATCH 10/82] Properly check vsphere_cloud_provider.rc --- roles/kubernetes-apps/cluster_roles/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index fefa7caeb..0511b7be5 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -104,6 +104,7 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' + - vsphere_cloud_provider.rc is defined - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') @@ -121,6 +122,7 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' + - vsphere_cloud_provider.rc is defined - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') From 76bb5f8d756e335f1b06e1b385f9868c2581f817 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Mon, 2 Apr 2018 10:57:24 -0500 Subject: [PATCH 11/82] check if dedicated service account token signing key exists --- roles/kubernetes/secrets/tasks/check-certs.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 4780b14d6..07820edf7 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -50,6 +50,7 @@ '{{ kube_cert_dir }}/kube-controller-manager-key.pem', '{{ kube_cert_dir }}/front-proxy-client.pem', '{{ kube_cert_dir }}/front-proxy-client-key.pem', + '{{ kube_cert_dir }}/service-account-key.pem', {% for host in groups['kube-master'] %} '{{ kube_cert_dir }}/admin-{{ host }}.pem' '{{ kube_cert_dir }}/admin-{{ host }}-key.pem' @@ -71,7 +72,8 @@ {% for cert in ['apiserver.pem', 'apiserver-key.pem', 'kube-scheduler.pem','kube-scheduler-key.pem', 'kube-controller-manager.pem','kube-controller-manager-key.pem', - 'front-proxy-client.pem','front-proxy-client-key.pem'] -%} + 'front-proxy-client.pem','front-proxy-client-key.pem', + 'service-account-key.pem'] -%} {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %} {% if not cert_file in existing_certs -%} {%- set gen = True -%} From 32f4194cf8d19a0a70395db4118ef16274ae9dff Mon Sep 17 00:00:00 2001 From: Xiaoxi He Date: Tue, 3 Apr 2018 10:39:17 +0800 Subject: [PATCH 12/82] Bump ingress-nginx-controller to version 0.12.0 --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 48f4743b1..24be1685d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -139,7 +139,7 @@ local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" -ingress_nginx_controller_image_tag: "0.11.0" +ingress_nginx_controller_image_tag: "0.12.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" cert_manager_version: "v0.2.3" From 428a554ddb4230abc267c17a8b7c1769345f0eb0 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 3 Apr 2018 14:29:50 +0800 Subject: [PATCH 13/82] istio: container download related things should defined in the download role --- roles/download/defaults/main.yml | 80 +++++++++++++++++++ roles/kubernetes-apps/istio/defaults/main.yml | 30 ------- 2 files changed, 80 insertions(+), 30 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 48f4743b1..bc36a0fdc 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -70,6 +70,22 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" +istio_proxy_image_repo: docker.io/istio/proxy +istio_proxy_image_tag: "{{ istio_version }}" +istio_proxy_init_image_repo: docker.io/istio/proxy_init +istio_proxy_init_image_tag: "{{ istio_version }}" +istio_ca_image_repo: docker.io/istio/istio-ca +istio_ca_image_tag: "{{ istio_version }}" +istio_mixer_image_repo: docker.io/istio/mixer +istio_mixer_image_tag: "{{ istio_version }}" +istio_pilot_image_repo: docker.io/istio/pilot +istio_pilot_image_tag: "{{ istio_version }}" +istio_proxy_debug_image_repo: docker.io/istio/proxy_debug +istio_proxy_debug_image_tag: "{{ istio_version }}" +istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer +istio_sidecar_initializer_image_tag: "{{ istio_version }}" +istio_statsd_image_repo: prom/statsd-exporter +istio_statsd_image_tag: latest hyperkube_image_repo: "gcr.io/google-containers/hyperkube" hyperkube_image_tag: "{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" @@ -199,6 +215,70 @@ downloads: mode: "0755" groups: - kube-master + istio_proxy: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_image_repo }}" + tag: "{{ istio_proxy_image_tag }}" + sha256: "{{ istio_proxy_digest_checksum|default(None) }}" + groups: + - kube-node + istio_proxy_init: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_init_image_repo }}" + tag: "{{ istio_proxy_init_image_tag }}" + sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}" + groups: + - kube-node + istio_ca: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_ca_image_repo }}" + tag: "{{ istio_ca_image_tag }}" + sha256: "{{ istio_ca_digest_checksum|default(None) }}" + groups: + - kube-node + istio_mixer: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_mixer_image_repo }}" + tag: "{{ istio_mixer_image_tag }}" + sha256: "{{ istio_mixer_digest_checksum|default(None) }}" + groups: + - kube-node + istio_pilot: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_pilot_image_repo }}" + tag: "{{ istio_pilot_image_tag }}" + sha256: "{{ istio_pilot_digest_checksum|default(None) }}" + groups: + - kube-node + istio_proxy_debug: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_debug_image_repo }}" + tag: "{{ istio_proxy_debug_image_tag }}" + sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}" + groups: + - kube-node + istio_sidecar_initializer: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_sidecar_initializer_image_repo }}" + tag: "{{ istio_sidecar_initializer_image_tag }}" + sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}" + groups: + - kube-node + istio_statsd: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_statsd_image_repo }}" + tag: "{{ istio_statsd_image_tag }}" + sha256: "{{ istio_statsd_digest_checksum|default(None) }}" + groups: + - kube-node hyperkube: enabled: true container: true diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml index dc51ea7d6..6124ce42e 100644 --- a/roles/kubernetes-apps/istio/defaults/main.yml +++ b/roles/kubernetes-apps/istio/defaults/main.yml @@ -1,32 +1,2 @@ --- -istio_enabled: false - istio_namespace: istio-system -istio_version: "0.2.6" - -istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux" -istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370 - -istio_proxy_image_repo: docker.io/istio/proxy -istio_proxy_image_tag: "{{ istio_version }}" - -istio_proxy_init_image_repo: docker.io/istio/proxy_init -istio_proxy_init_image_tag: "{{ istio_version }}" - -istio_ca_image_repo: docker.io/istio/istio-ca -istio_ca_image_tag: "{{ istio_version }}" - -istio_mixer_image_repo: docker.io/istio/mixer -istio_mixer_image_tag: "{{ istio_version }}" - -istio_pilot_image_repo: docker.io/istio/pilot -istio_pilot_image_tag: "{{ istio_version }}" - -istio_proxy_debug_image_repo: docker.io/istio/proxy_debug -istio_proxy_debug_image_tag: "{{ istio_version }}" - -istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer -istio_sidecar_initializer_image_tag: "{{ istio_version }}" - -istio_statsd_image_repo: prom/statsd-exporter -istio_statsd_image_tag: latest From b54e0918865d9d10db4aa2fcaf6b0fb4ad5a623d Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Wed, 4 Apr 2018 18:18:51 +0800 Subject: [PATCH 14/82] Persist ip_vs modules --- roles/kubernetes/node/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index defd3e9f7..dd2885a97 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -118,6 +118,14 @@ tags: - kube-proxy +- name: Persist ip_vs modules + copy: + dest: /etc/modules-load.d/kube_proxy-ipvs.conf + content: "ip_vs\nip_vs_rr\nip_vs_wrr\nip_vs_sh\nnf_conntrack_ipv4" + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + - name: Write proxy manifest template: src: manifests/kube-proxy.manifest.j2 From 973e7372b452fd9cc9141dccff6f2f9ffc1ecac5 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Wed, 4 Apr 2018 23:05:27 +0800 Subject: [PATCH 15/82] content: | --- roles/kubernetes/node/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index dd2885a97..13cc0740d 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -121,7 +121,12 @@ - name: Persist ip_vs modules copy: dest: /etc/modules-load.d/kube_proxy-ipvs.conf - content: "ip_vs\nip_vs_rr\nip_vs_wrr\nip_vs_sh\nnf_conntrack_ipv4" + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + nf_conntrack_ipv4 when: kube_proxy_mode == 'ipvs' tags: - kube-proxy From f26e16bf7931cd0546395458a6829a3909ef2fa1 Mon Sep 17 00:00:00 2001 From: Shravan Papanaidu Date: Wed, 4 Apr 2018 13:26:16 -0700 Subject: [PATCH 16/82] kubectl get pods from 'test' namespace as the pods were created in 'test' ns --- tests/testcases/030_check-network.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index 2fa78545f..531b84c06 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -14,7 +14,7 @@ - name: Wait for pods to be ready - shell: "{{bin_dir}}/kubectl get pods" + shell: "{{bin_dir}}/kubectl get pods -n test" register: pods until: - '"ContainerCreating" not in pods.stdout' @@ -25,18 +25,18 @@ no_log: true - name: Get pod names - shell: "{{bin_dir}}/kubectl get pods -o json" + shell: "{{bin_dir}}/kubectl get pods -n test -o json" register: pods no_log: true - name: Get hostnet pods - command: "{{bin_dir}}/kubectl get pods -o + command: "{{bin_dir}}/kubectl get pods -n test -o jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: hostnet_pods no_log: true - name: Get running pods - command: "{{bin_dir}}/kubectl get pods -o + command: "{{bin_dir}}/kubectl get pods -n test -o jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: running_pods no_log: true From ca6a07f5954d77afdbdf6c2ff2b0db01a1e4c210 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Thu, 5 Apr 2018 22:36:50 +0800 Subject: [PATCH 17/82] Add VMware vSphere to deployed --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56210a8f9..994469b22 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Deploy a Production Ready Kubernetes Cluster If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. -- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** +-   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal** - **High available** cluster - **Composable** (Choice of the network plugin for instance) - Support most popular **Linux distributions** From ca40d51bc618118c1ae16a75078649c063eee8fc Mon Sep 17 00:00:00 2001 From: Daniel Hoherd Date: Thu, 5 Apr 2018 15:54:58 -0700 Subject: [PATCH 18/82] Fix typos (no logic changes) --- inventory/sample/group_vars/k8s-cluster.yml | 6 +++--- roles/kubernetes/preinstall/tasks/dhclient-hooks.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 031108767..345d22a36 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -1,8 +1,8 @@ # Kubernetes configuration dirs and system namespace. # Those are where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. +# the kubernetes normally puts in /srv/kubernetes. # This puts them in a sane location and namespace. -# Editting those values will almost surely break something. +# Editing those values will almost surely break something. kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" @@ -28,7 +28,7 @@ local_release_dir: "/tmp/releases" retry_stagger: 5 # This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... +# cert files to. Not really changeable... kube_cert_group: kube-cert # Cluster Loglevel configuration diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml index 8c0a5f599..0ab2c9b07 100644 --- a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml +++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml @@ -15,7 +15,7 @@ notify: Preinstall | restart network when: dhclientconffile is defined -- name: Configue dhclient hooks for resolv.conf (non-RH) +- name: Configure dhclient hooks for resolv.conf (non-RH) template: src: dhclient_dnsupdate.sh.j2 dest: "{{ dhclienthookfile }}" @@ -24,7 +24,7 @@ notify: Preinstall | restart network when: ansible_os_family != "RedHat" -- name: Configue dhclient hooks for resolv.conf (RH-only) +- name: Configure dhclient hooks for resolv.conf (RH-only) template: src: dhclient_dnsupdate_rh.sh.j2 dest: "{{ dhclienthookfile }}" From 9086665013a10051e7f79233937b3ba19dcc4878 Mon Sep 17 00:00:00 2001 From: rongzhang Date: Fri, 6 Apr 2018 17:28:33 +0800 Subject: [PATCH 19/82] Fix issues #2522 Support Debian stretch https://download.docker.com/linux/debian/dists/ --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 994469b22..3bd0ebfb9 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Supported Linux Distributions ----------------------------- - **Container Linux by CoreOS** -- **Debian** Jessie +- **Debian** Jessie, Stretch, Wheezy - **Ubuntu** 16.04 - **CentOS/RHEL** 7 - **Fedora/CentOS** Atomic From dfc46f02d75a26c23641ec33e05db83b225a899c Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Fri, 6 Apr 2018 15:29:52 -0500 Subject: [PATCH 20/82] Adding missing service-account certificate for vault Missed in #2554 --- roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml index d74704448..02c512a4e 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml @@ -32,7 +32,7 @@ sync_file_hosts: "{{ groups['kube-master'] }}" sync_file_is_cert: true sync_file_owner: kube - with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"] + with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"] - name: sync_kube_master_certs | Set facts for kube master components sync_file results set_fact: From 66b61866cdc7fbe7bde19bf260a361414953f19d Mon Sep 17 00:00:00 2001 From: rongzhang Date: Fri, 6 Apr 2018 18:16:05 +0800 Subject: [PATCH 21/82] Fix check docker error for atomic Fix issues #2611 --- roles/docker/tasks/pre-upgrade.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml index 9315da305..8b75cba0d 100644 --- a/roles/docker/tasks/pre-upgrade.yml +++ b/roles/docker/tasks/pre-upgrade.yml @@ -6,7 +6,9 @@ with_items: - docker - docker-engine - when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + when: + - ansible_os_family == 'Debian' + - (docker_versioned_pkg[docker_version | string] | search('docker-ce')) - name: Ensure old versions of Docker are not installed. | RedHat package: @@ -17,4 +19,7 @@ - docker-common - docker-engine - docker-selinux - when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) \ No newline at end of file + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + - not is_atomic From f954bc0a5a7b0229de7d1ae419f02e34c546cb13 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 9 Apr 2018 12:27:53 +0300 Subject: [PATCH 22/82] Remove jinja2 dependency of do While `do` looks cleaner, forcing this extra option in ansible.cfg seems to be more invasive. It would be better to keep the traditional approach of `set dummy = ` instead. --- ansible.cfg | 1 - .../kubernetes/node/templates/kubelet.standard.env.j2 | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index 6f381690e..d3102a6f4 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -13,4 +13,3 @@ callback_whitelist = profile_tasks roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles deprecation_warnings=False inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds -jinja2_extensions = jinja2.ext.do diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 5fef2476e..e9d7e960f 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -83,20 +83,20 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {# Kubelet node labels #} {% set role_node_labels = [] %} {% if inventory_hostname in groups['kube-master'] %} -{% do role_node_labels.append('node-role.kubernetes.io/master=true') %} +{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %} {% if not standalone_kubelet|bool %} -{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} +{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% else %} -{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} +{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% if inventory_hostname in groups['kube-ingress']|default([]) %} -{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} +{% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% endif %} {% set inventory_node_labels = [] %} {% if node_labels is defined %} {% for labelname, labelvalue in node_labels.iteritems() %} -{% do inventory_node_labels.append(labelname + '=' + labelvalue) %} +{% set dummy = inventory_node_labels.append(labelname + '=' + labelvalue) %} {% endfor %} {% endif %} {% set all_node_labels = role_node_labels + inventory_node_labels %} From b68854f79d5b777ea7f8a0ff9625c4e0b28faa65 Mon Sep 17 00:00:00 2001 From: Atoms Date: Mon, 9 Apr 2018 13:19:26 +0300 Subject: [PATCH 23/82] fix kubectl download location and kubectl.sh helper owner/group remove --- roles/kubernetes/client/tasks/main.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index cf70b4995..d34131a3a 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -55,7 +55,7 @@ - name: Copy kubectl binary to ansible host fetch: src: "{{ bin_dir }}/kubectl" - dest: "{{ bin_dir }}/kubectl" + dest: "{{ artifacts_dir }}/kubectl" flat: yes validate_checksum: no become: no @@ -68,8 +68,6 @@ #!/bin/bash kubectl --kubeconfig=admin.conf $@ dest: "{{ artifacts_dir }}/kubectl.sh" - owner: root - group: root mode: 0755 become: no run_once: yes From 4c12b273ac46e0ca0a1b40d93ae5437637b65c6c Mon Sep 17 00:00:00 2001 From: Marcelo Grebois Date: Mon, 9 Apr 2018 12:49:05 +0200 Subject: [PATCH 24/82] Enabling MutatingAdmissionWebhook for Istio Automatic sidecar injection https://istio.io/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection --- roles/kubernetes/master/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 6325bb31c..7ae8b1823 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -52,7 +52,7 @@ kube_apiserver_admission_control: {%- if kube_version | version_compare('v1.9', '<') -%} GenericAdmissionWebhook {%- else -%} - ValidatingAdmissionWebhook + ValidatingAdmissionWebhook,MutatingAdmissionWebhook {%- endif -%} - ResourceQuota From 94eb18b3d9466b140fde75bd7810124e89dc98fd Mon Sep 17 00:00:00 2001 From: Vikas Kumar Date: Tue, 27 Feb 2018 01:12:03 +1100 Subject: [PATCH 25/82] Replaced ansible_ssh_host with ansible_host in sample inventory file as the former is deprecated since Ansible v2.0 Fixed the reference of ansible_user in kubespray-defaults role References: - http://docs.ansible.com/ansible/latest/intro_inventory.html --- inventory/sample/hosts.ini | 14 +++++++------- roles/kubespray-defaults/defaults/main.yaml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini index 245783334..bddfa2f80 100644 --- a/inventory/sample/hosts.ini +++ b/inventory/sample/hosts.ini @@ -1,14 +1,14 @@ # ## Configure 'ip' variable to bind kubernetes services on a # ## different ip than the default iface -# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1 -# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2 -# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3 -# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4 -# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5 -# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6 +# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 +# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 +# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 +# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 +# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 +# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 # ## configure a bastion host if your nodes are not directly reachable -# bastion ansible_ssh_host=x.x.x.x +# bastion ansible_host=x.x.x.x ansible_user=some_user # [kube-master] # node1 diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index d6217d654..cb845a067 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -5,7 +5,7 @@ bootstrap_os: none # Use proxycommand if bastion host is in group all # This change obseletes editing ansible.cfg file depending on bastion existance -ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" +ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" kube_api_anonymous_auth: false From 0c0f6b755db5b2ba35b91b75cfe837df5f0967bb Mon Sep 17 00:00:00 2001 From: Robin Skahjem-Eriksen Date: Tue, 10 Apr 2018 11:09:43 +0200 Subject: [PATCH 26/82] Fix new envvar for setting openstack_tenant_id Changed from OS_PROJECT_ID to OS_PROJECT_NAME. --- roles/kubernetes/node/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 52ca8d59d..bac6ae1db 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -101,7 +101,7 @@ openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" openstack_username: "{{ lookup('env','OS_USERNAME') }}" openstack_password: "{{ lookup('env','OS_PASSWORD') }}" openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true) }}" openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" From 45f15bf753745517d092c52bc3971dfa4e4ff132 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 10 Apr 2018 14:37:24 +0300 Subject: [PATCH 27/82] Revert "Fix new envvar for setting openstack_tenant_id" (#2640) --- roles/kubernetes/node/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index bac6ae1db..52ca8d59d 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -101,7 +101,7 @@ openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" openstack_username: "{{ lookup('env','OS_USERNAME') }}" openstack_password: "{{ lookup('env','OS_PASSWORD') }}" openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true) }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" From 09f93d9e0cee214a0227aa0aaea714f7f59e28f0 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 10 Apr 2018 16:02:33 +0300 Subject: [PATCH 28/82] Fix CI upgrade scenario by using dynamic inventory file (#2635) Also updates the commit ID we use as a basis for upgrade tests. --- .gitlab-ci.yml | 25 +++++++++++++------------ tests/Makefile | 2 +- tests/support/aws.groovy | 4 ++-- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5af631476..6a1eef6ab 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -20,6 +20,7 @@ variables: GCE_PREEMPTIBLE: "false" ANSIBLE_KEEP_REMOTE_FILES: "1" ANSIBLE_CONFIG: ./tests/ansible.cfg + ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini IDEMPOT_CHECK: "false" RESET_CHECK: "false" UPGRADE_TEST: "false" @@ -90,9 +91,9 @@ before_script: - cd tests && make create-${CI_PLATFORM} -s ; cd - # Check out latest tag if testing upgrade - # Uncomment when gitlab kargo repo has tags + # Uncomment when gitlab kubespray repo has tags #- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1)) - - test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c + - test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6 # Checkout the CI vars file so it is available - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml # Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021 @@ -102,7 +103,7 @@ before_script: # Create cluster - > ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -121,7 +122,7 @@ before_script: test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; git checkout "${CI_BUILD_REF}"; ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -137,20 +138,20 @@ before_script: # Tests Cases ## Test Master API - > - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL + ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" ## Ping the between 2 pod - - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL + - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL ## Advanced DNS checks - - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL + - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL ## Idempotency checks 1/5 (repeat deployment) - > if [ "${IDEMPOT_CHECK}" = "true" ]; then ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -167,7 +168,7 @@ before_script: - > if [ "${IDEMPOT_CHECK}" = "true" ]; then ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -182,7 +183,7 @@ before_script: - > if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -199,7 +200,7 @@ before_script: - > if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then ansible-playbook - -i inventory/sample/hosts.ini + -i ${ANSIBLE_INVENTORY} -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER @@ -215,7 +216,7 @@ before_script: ## Idempotency checks 5/5 (Advanced DNS checks) - > if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} + ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL; diff --git a/tests/Makefile b/tests/Makefile index 8d17e243c..30442fb25 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,4 +1,4 @@ -INVENTORY=$(PWD)/../inventory/sample/hosts.ini +INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini $(HOME)/.ssh/id_rsa: mkdir -p $(HOME)/.ssh diff --git a/tests/support/aws.groovy b/tests/support/aws.groovy index a5ce89b8f..bc13b513a 100644 --- a/tests/support/aws.groovy +++ b/tests/support/aws.groovy @@ -1,9 +1,9 @@ def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) { - def inventory_path = pwd() + "/inventory/sample/hosts.ini" + def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini" dir('tests') { wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { try { - create_vm("${env.JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret) + create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret) install_cluster(inventory_path, credentialsId, network_plugin) test_apiserver(inventory_path, credentialsId) From 77b3f9bb97017c2ba8e18a423af273fb74424869 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Tue, 10 Apr 2018 09:19:25 -0500 Subject: [PATCH 29/82] Removing default for volume-plugins mountpoint (#2618) All checks test if this is defined meaning there is no way to undefine it. --- roles/kubernetes/node/templates/kubelet-container.j2 | 5 +++++ .../node/templates/kubelet.docker.service.j2 | 2 -- .../kubernetes/node/templates/kubelet.host.service.j2 | 2 -- .../kubernetes/node/templates/kubelet.rkt.service.j2 | 11 ++++++----- .../kubernetes/node/templates/kubelet.standard.env.j2 | 2 -- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 22671b2c3..75e07ca27 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -24,6 +24,11 @@ -v /var/lib/kubelet:/var/lib/kubelet:shared \ -v /var/lib/cni:/var/lib/cni:shared \ -v /var/run:/var/run:rw \ + {# we can run into issues with double mounting /var/lib/kubelet #} + {# surely there's a better way to do this #} + {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} + -v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \ + {% endif -%} -v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v /etc/os-release:/etc/os-release:ro \ {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2 index bba1a5fc4..c20cf797f 100644 --- a/roles/kubernetes/node/templates/kubelet.docker.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2 @@ -23,9 +23,7 @@ ExecStart={{ bin_dir }}/kubelet \ Restart=always RestartSec=10s ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet -{% if kubelet_flexvolumes_plugins_dir is defined %} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} -{% endif %} ExecReload={{ docker_bin_dir }}/docker restart kubelet diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2 index c7dad4e29..3584cfcf5 100644 --- a/roles/kubernetes/node/templates/kubelet.host.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.host.service.j2 @@ -7,9 +7,7 @@ Wants=docker.socket [Service] User=root EnvironmentFile=-{{kube_config_dir}}/kubelet.env -{% if kubelet_flexvolumes_plugins_dir is defined %} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} -{% endif %} ExecStart={{ bin_dir }}/kubelet \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 4286d9470..283ce1ad9 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -12,10 +12,7 @@ LimitNOFILE=40000 ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid ExecStartPre=-/bin/mkdir -p /var/lib/kubelet - -{% if kubelet_flexvolumes_plugins_dir is defined %} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} -{% endif %} EnvironmentFile={{kube_config_dir}}/kubelet.env # stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts @@ -41,7 +38,9 @@ ExecStart=/usr/bin/rkt run \ --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \ --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \ -{% if kubelet_flexvolumes_plugins_dir is defined %} +{# we can run into issues with double mounting /var/lib/kubelet #} +{# surely there's a better way to do this #} +{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \ {% endif %} {% if kubelet_load_modules == true %} @@ -65,7 +64,9 @@ ExecStart=/usr/bin/rkt run \ --mount volume=var-lib-kubelet,target=/var/lib/kubelet \ --mount volume=var-log,target=/var/log \ --mount volume=hosts,target=/etc/hosts \ -{% if kubelet_flexvolumes_plugins_dir is defined %} +{# we can run into issues with double mounting /var/lib/kubelet #} +{# surely there's a better way to do this #} +{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ {% endif %} --stage1-from-dir=stage1-fly.aci \ diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index e9d7e960f..31a72f518 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -110,9 +110,7 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" {% endif %} -{% if kubelet_flexvolumes_plugins_dir is defined %} KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}" -{% endif %} # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" From 0f35e17e230d8a0a57bae0ddd786a93a4a700e69 Mon Sep 17 00:00:00 2001 From: Robin Skahjem-Eriksen Date: Tue, 10 Apr 2018 16:23:31 +0200 Subject: [PATCH 30/82] Fix new envvar for setting openstack_tenant_id (#2641) Changed from OS_PROJECT_ID to OS_PROJECT_NAME. --- roles/kubernetes/node/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 52ca8d59d..e1a1f1777 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -101,7 +101,7 @@ openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" openstack_username: "{{ lookup('env','OS_USERNAME') }}" openstack_password: "{{ lookup('env','OS_PASSWORD') }}" openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true)) }}" openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" From 88765f62e642e0552c77b5f2cb07eb58dae89bd0 Mon Sep 17 00:00:00 2001 From: Marcelo Grebois Date: Tue, 10 Apr 2018 17:17:39 +0200 Subject: [PATCH 31/82] Updating order https://kubernetes.io/docs/admin/admission-controllers/#is-there-a-recommended-set-of-admission-controllers-to-use --- roles/kubernetes/master/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 7ae8b1823..c2715df85 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -52,7 +52,7 @@ kube_apiserver_admission_control: {%- if kube_version | version_compare('v1.9', '<') -%} GenericAdmissionWebhook {%- else -%} - ValidatingAdmissionWebhook,MutatingAdmissionWebhook + MutatingAdmissionWebhook,ValidatingAdmissionWebhook {%- endif -%} - ResourceQuota From 5db1c3eef7752f45c18eb03bb27f2fb92b75b2de Mon Sep 17 00:00:00 2001 From: Karol Chrapek Date: Mon, 9 Apr 2018 00:45:55 +0200 Subject: [PATCH 32/82] Add note about privilege escalation method to the README --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 3bd0ebfb9..9ceb241c0 100644 --- a/README.md +++ b/README.md @@ -105,6 +105,9 @@ Requirements - **Your ssh key must be copied** to all the servers part of your inventory. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to. in order to avoid any issue during deployment you should disable your firewall. +- If kubespray is ran from non-root user account, correct privilege escalation method + should be configured in the target servers. Then the `ansible_become` flag + or command parameters `--become or -b` should be specified. Network Plugins --------------- From 6c954df636e84aff57a13f531ff479e44fdbc5d5 Mon Sep 17 00:00:00 2001 From: Atoms Date: Wed, 11 Apr 2018 12:05:33 +0300 Subject: [PATCH 33/82] move when condition to main.yml --- roles/download/tasks/main.yml | 1 + roles/download/tasks/sync_container.yml | 12 ++---------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index c6e910e5d..2474b4029 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -22,3 +22,4 @@ - item.value.enabled - item.value.container - download_run_once + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml index 1ca84ad67..c7e37d7f3 100644 --- a/roles/download/tasks/sync_container.yml +++ b/roles/download/tasks/sync_container.yml @@ -7,7 +7,6 @@ when: - download.enabled - download.container - - group_names | intersect(download.groups) | length tags: - facts @@ -18,7 +17,7 @@ - download.enabled - download.container - download_run_once - - group_names | intersect(download.groups) | length + tags: - facts @@ -29,7 +28,6 @@ - download.enabled - download.container - download_run_once - - group_names | intersect(download.groups) | length - name: "container_download | Update the 'container_changed' fact" set_fact: @@ -39,14 +37,13 @@ - download.container - download_run_once - pull_required|default(download_always_pull) - - group_names | intersect(download.groups) | length run_once: "{{ download_run_once }}" tags: - facts - name: container_download | Stat saved container image stat: - path: "{{fname}}" + path: "{{ fname }}" register: img changed_when: false delegate_to: "{{ download_delegate }}" @@ -57,7 +54,6 @@ - download.enabled - download.container - download_run_once - - group_names | intersect(download.groups) | length tags: - facts @@ -73,7 +69,6 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") - (container_changed or not img.stat.exists) - - group_names | intersect(download.groups) | length - name: container_download | copy container images to ansible host synchronize: @@ -93,7 +88,6 @@ - inventory_hostname == download_delegate - download_delegate != "localhost" - saved.changed - - group_names | intersect(download.groups) | length - name: container_download | upload container images to nodes synchronize: @@ -115,7 +109,6 @@ - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") - - group_names | intersect(download.groups) | length tags: - upload - upgrade @@ -128,7 +121,6 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") - - group_names | intersect(download.groups) | length tags: - upload - upgrade From ff003cfa3cbef1f5cfe017fc54e97e2ccf86e296 Mon Sep 17 00:00:00 2001 From: rongzhang Date: Wed, 11 Apr 2018 17:30:01 +0800 Subject: [PATCH 34/82] Fix missing install remove-node feature --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 2327160ad..ada55fcd9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,6 +26,7 @@ data_files = upgrade-cluster.yml scale.yml reset.yml + remove-node.yml extra_playbooks/upgrade-only-k8s.yml /usr/share/kubespray/roles = roles/* /usr/share/doc/kubespray/ = From 0ed1919a38414bfb8f52d0b4955114a00089e798 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 6 Feb 2018 12:45:02 +0000 Subject: [PATCH 35/82] Vagrantfile: Add support for openSUSE Leap 42.3 Add support for provisioning kubespray on openSUSE Leap 42.3 virtual machines. --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index 536bbff2b..b9464f806 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,6 +18,7 @@ SUPPORTED_OS = { "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, "centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"}, + "opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, } # Defaults for config options defined in CONFIG From 112ccfa9dbae176f2f0ddda72dd4c9f6b2740bb5 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Thu, 22 Feb 2018 15:17:57 +0100 Subject: [PATCH 36/82] Vagrantfile: Add support for openSUSE Tumbleweed Add support for provisioning kubespray on openSUSE Tumbleweed virtual machines. --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index b9464f806..0b86f7eda 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -19,6 +19,7 @@ SUPPORTED_OS = { "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, "centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"}, "opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, + "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, } # Defaults for config options defined in CONFIG From e113d1ccabb50c89034d52868bd36015b6006f4b Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 11 Apr 2018 16:59:11 +0100 Subject: [PATCH 37/82] Vagrantfile: Use rsync to copy working directory to VM Depending on the VM configuration, vagrant may either use 'rsync' or vboxfs for populating the working directory to the VM. However, vboxfs means that any files created by the VM will also be present on the host. As such, lets be explicit and always use 'rsync' to copy the directory to the VM so we can keep the host copy clean. Moreover, the default rsync options include '--copy-links' and this breaks rsync if there are missing symlinks in the working directory like the following one: Error: symlink has no referent: "/home/user/kubespray/contrib/network-storage/glusterfs/group_vars" As such, we override the default options to drop --copy-links. --- Vagrantfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 0b86f7eda..cf174ef77 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -86,7 +86,6 @@ Vagrant.configure("2") do |config| if Vagrant.has_plugin?("vagrant-vbguest") then config.vbguest.auto_update = false end - (1..$num_instances).each do |i| config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| config.vm.hostname = vm_name @@ -112,8 +111,10 @@ Vagrant.configure("2") do |config| end end + config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] + $shared_folders.each do |src, dst| - config.vm.synced_folder src, dst + config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] end config.vm.provider :virtualbox do |vb| From dca47773471d69ad478e0d012cf1829c21f944ef Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 19 Feb 2018 10:03:12 +0000 Subject: [PATCH 38/82] roles: bootstrap-os: Add support for SUSE distributions Install some required packages when running on SUSE distributions. --- roles/bootstrap-os/tasks/bootstrap-opensuse.yml | 7 +++++++ roles/bootstrap-os/tasks/main.yml | 3 +++ 2 files changed, 10 insertions(+) create mode 100644 roles/bootstrap-os/tasks/bootstrap-opensuse.yml diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml new file mode 100644 index 000000000..abedd2195 --- /dev/null +++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -0,0 +1,7 @@ +--- +- name: Install required packages (SUSE) + package: + name: "{{ item }}" + state: present + with_items: + - python-cryptography diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 01031deeb..139e22b5e 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -11,6 +11,9 @@ - import_tasks: bootstrap-centos.yml when: bootstrap_os == "centos" +- import_tasks: bootstrap-opensuse.yml + when: bootstrap_os == "opensuse" + - import_tasks: setup-pipelining.yml - name: check if atomic host From 4ba25326ed23d646646c151f6cf853bf9f79cac5 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Thu, 22 Feb 2018 15:54:40 +0000 Subject: [PATCH 39/82] roles: bootstrap-os: Use 'hostname' command on Tumbleweed openSUSE Tumbleweed is having the same problems with CoreOS when it comes to using the hostname ansible module (#1588, #1600) so we need to apply a similar workaround. Co-authored-by: Markos Chandras Link: http://bugzilla.opensuse.org/show_bug.cgi?id=997614 --- roles/bootstrap-os/tasks/main.yml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 139e22b5e..c921b643e 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -29,18 +29,25 @@ gather_subset: '!all' filter: ansible_* -- name: Assign inventory name to unconfigured hostnames (non-CoreOS) +- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed) hostname: name: "{{inventory_hostname}}" - when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname + when: + - override_system_hostname + - ansible_distribution not in ['openSUSE Tumbleweed'] + - ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] -- name: Assign inventory name to unconfigured hostnames (CoreOS only) +- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only) command: "hostnamectl set-hostname {{inventory_hostname}}" register: hostname_changed - when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname + when: + - ansible_hostname == 'localhost' + - ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + - override_system_hostname -- name: Update hostname fact (CoreOS only) +- name: Update hostname fact (CoreOS and Tumbleweed only) setup: gather_subset: '!all' filter: ansible_hostname - when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed + when: + - hostname_changed.changed From e42203a13efebfdf1a41e756b8b9fe04eda75c5c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 6 Feb 2018 12:58:17 +0000 Subject: [PATCH 40/82] roles: kubernetes: preinstall: Add SUSE support Add support for installing package dependencies and refreshing metadata on SUSE distributions Co-authored-by: Nirmoy Das --- roles/kubernetes/preinstall/tasks/main.yml | 9 +++++++++ roles/kubernetes/preinstall/tasks/verify-settings.yml | 4 ++-- roles/kubernetes/preinstall/vars/suse.yml | 4 ++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 roles/kubernetes/preinstall/vars/suse.yml diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 652e35682..cd5dd7acd 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -143,6 +143,15 @@ - not is_atomic tags: bootstrap-os +- name: Update package management cache (zypper) - SUSE + shell: zypper -n --gpg-auto-import-keys ref + register: make_cache_output + until: make_cache_output|succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_pkg_mgr == 'zypper' + tags: bootstrap-os - name: Update package management cache (APT) apt: diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml index 8f0a2e854..5f647101d 100644 --- a/roles/kubernetes/preinstall/tasks/verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml @@ -12,7 +12,7 @@ - name: Stop if unknown OS assert: - that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS'] + that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed'] ignore_errors: "{{ ignore_assert_errors }}" - name: Stop if unknown network plugin @@ -94,4 +94,4 @@ assert: that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=') when: kube_network_plugin == 'cilium' - ignore_errors: "{{ ignore_assert_errors }}" \ No newline at end of file + ignore_errors: "{{ ignore_assert_errors }}" diff --git a/roles/kubernetes/preinstall/vars/suse.yml b/roles/kubernetes/preinstall/vars/suse.yml new file mode 100644 index 000000000..3f4f9aee9 --- /dev/null +++ b/roles/kubernetes/preinstall/vars/suse.yml @@ -0,0 +1,4 @@ +--- +required_pkgs: + - device-mapper + - ebtables From 45eac53ec7d67c4d974eda2a1e10ccb808215819 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Thu, 22 Feb 2018 16:19:20 +0000 Subject: [PATCH 41/82] roles: kubernetes: preinstall: Install openssl-1.1.0 on Tumbleweed The openssl package on Tumbleweed is actually a virtual package covering openssl-1.0.0 and openssl-1.1.0 implementations. It defaults to 1.1.0 so when trying to install it and openssl-1.0.0 is installed, zypper fails with conflicts. As such, lets explicitly pull the package that we need which also updates the virtual one. Co-authored-by: Markos Chandras --- roles/kubernetes/preinstall/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 149cbb42a..3bf847fb9 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -8,7 +8,7 @@ epel_enabled: false common_required_pkgs: - python-httplib2 - - openssl + - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}" - curl - rsync - bash-completion From 44a0626fc87e9c8d267ca17cdfd16c43ac423cf4 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 6 Feb 2018 13:35:40 +0000 Subject: [PATCH 42/82] roles: docker: Add support for SUSE distributions Add support for installing Docker on SUSE distributions. The Docker repository at https://yum.dockerproject.org/repo/main/ does not support recent openSUSE distributions so the only alternative is to use the packages from the distro repositories. This however renders the 'docker_version' Ansible variable useless on SUSE. --- roles/docker/tasks/main.yml | 16 ++++++++++++---- roles/docker/templates/docker.service.j2 | 6 ++++++ roles/docker/vars/suse.yml | 15 +++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 roles/docker/vars/suse.yml diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 729397b44..2b5c24f5e 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -15,6 +15,14 @@ tags: - facts +# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL +# openSUSE version so we can't use it. The only alternative is to use the docker +# packages from the distribution repositories. +- name: Warn about Docker version on SUSE + debug: + msg: "SUSE distributions always install Docker from the distro repos" + when: ansible_pkg_mgr == 'zypper' + - include_tasks: set_facts_dns.yml when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' tags: @@ -43,7 +51,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ docker_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) - name: ensure docker-ce repository is enabled action: "{{ docker_repo_info.pkg_repo }}" @@ -51,7 +59,7 @@ repo: "{{item}}" state: present with_items: "{{ docker_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0) - name: ensure docker-engine repository public key is installed action: "{{ dockerproject_repo_key_info.pkg_key }}" @@ -64,7 +72,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ dockerproject_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) - name: ensure docker-engine repository is enabled action: "{{ dockerproject_repo_info.pkg_repo }}" @@ -72,7 +80,7 @@ repo: "{{item}}" state: present with_items: "{{ dockerproject_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) - name: Configure docker repository on RedHat/CentOS template: diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index d8efe2025..8dc82bbb2 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -7,6 +7,9 @@ Wants=docker-storage-setup.service {% elif ansible_os_family == "Debian" %} After=network.target docker.socket Wants=docker.socket +{% elif ansible_os_family == "Suse" %} +After=network.target containerd.socket containerd.service +Requires=containerd.socket containerd.service {% endif %} [Service] @@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID Delegate=yes KillMode=process ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \ +{% if ansible_os_family == "Suse" %} + --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \ +{% endif %} $DOCKER_OPTS \ $DOCKER_STORAGE_OPTIONS \ $DOCKER_NETWORK_OPTIONS \ diff --git a/roles/docker/vars/suse.yml b/roles/docker/vars/suse.yml new file mode 100644 index 000000000..d89a50a7f --- /dev/null +++ b/roles/docker/vars/suse.yml @@ -0,0 +1,15 @@ +--- +docker_kernel_min_version: '0' + +docker_package_info: + pkg_mgr: zypper + pkgs: + - name: docker + +docker_repo_key_info: + pkg_key: '' + repo_keys: [] + +docker_repo_info: + pkg_repo: '' + repos: [] From cdb63a8c498db5b6fcdd8494ad4c0000f83ab441 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 11 Apr 2018 13:27:59 +0100 Subject: [PATCH 43/82] roles: docker: Ensure service is started if docker is already installed If the 'docker' package is already installed, then the handlers will not run and the service will not be (re-)started. As such, lets make sure that the service is started even if the packages are already installed. --- roles/docker/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 2b5c24f5e..3668f61b8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -118,6 +118,12 @@ notify: restart docker when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0) +- name: ensure service is started if docker packages are already present + service: + name: docker + state: started + when: docker_task_result is not changed + - name: flush handlers so we can wait for docker to come up meta: flush_handlers From 2d3478125966018bafbe86550893da98eb9f4ac2 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 7 Feb 2018 07:56:13 +0000 Subject: [PATCH 44/82] roles: etcd: Add support for SUSE distributions Add path for certificate location for SUSE distributions. Also make sure the 'update-ca-certificates' command is executed on SUSE hosts as well. --- roles/etcd/tasks/upd_ca_trust.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml index dd36554fb..0ff363860 100644 --- a/roles/etcd/tasks/upd_ca_trust.yml +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -8,6 +8,8 @@ /etc/pki/ca-trust/source/anchors/etcd-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/etcd-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/etcd-ca.pem {%- endif %} tags: - facts @@ -19,9 +21,9 @@ remote_src: true register: etcd_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) command: update-ca-certificates - when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract From d07f75b38967c42d2bafe4b9976daa5283e8bb3e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 7 Feb 2018 08:18:31 +0000 Subject: [PATCH 45/82] roles: kubernetes: secrets: Add SUSE support Add path for certificate location for SUSE distributions. Also make sure the 'update-ca-certificates' command is executed on SUSE hosts as well. --- roles/kubernetes/secrets/tasks/upd_ca_trust.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml index eec44987f..cdd5f48fa 100644 --- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml +++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml @@ -8,6 +8,8 @@ /etc/pki/ca-trust/source/anchors/kube-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/kube-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/kube-ca.pem {%- endif %} tags: - facts @@ -19,9 +21,9 @@ remote_src: true register: kube_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) command: update-ca-certificates - when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract From 02bf742e15bcd821be69ccbab17b2d267b7a9024 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 12 Feb 2018 12:11:57 +0000 Subject: [PATCH 46/82] roles: rkt: Add support for SUSE distributions The RPM file that's provided by upstream can be used for SUSE distributions as well. Moreover we simplify the playbook to use the 'package' module to install packages across different distros. Link: https://github.com/rkt/rkt/pull/3904 --- roles/rkt/tasks/install.yml | 17 +++-------------- roles/rkt/vars/suse.yml | 2 ++ 2 files changed, 5 insertions(+), 14 deletions(-) create mode 100644 roles/rkt/vars/suse.yml diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml index 599f9e50e..cbaaf4085 100644 --- a/roles/rkt/tasks/install.yml +++ b/roles/rkt/tasks/install.yml @@ -15,22 +15,11 @@ tags: - facts -- name: install rkt pkg on ubuntu - apt: - deb: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" +- name: install rkt pkg + package: + name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" state: present register: rkt_task_result until: rkt_task_result|succeeded retries: 4 delay: "{{ retry_stagger | random + 3 }}" - when: ansible_os_family == "Debian" - -- name: install rkt pkg on centos - yum: - pkg: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" - state: present - register: rkt_task_result - until: rkt_task_result|succeeded - retries: 4 - delay: "{{ retry_stagger | random + 3 }}" - when: ansible_os_family == "RedHat" diff --git a/roles/rkt/vars/suse.yml b/roles/rkt/vars/suse.yml new file mode 100644 index 000000000..13149e8fb --- /dev/null +++ b/roles/rkt/vars/suse.yml @@ -0,0 +1,2 @@ +--- +rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm" From d75b5d6931820ff87f3460f57be57a5f63c2c922 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 7 Feb 2018 11:21:56 +0000 Subject: [PATCH 47/82] README.md: Add openSUSE Leap and Tumbleweed as supported distributions --- README.md | 2 ++ docs/opensuse.md | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 docs/opensuse.md diff --git a/README.md b/README.md index 9ceb241c0..094c36bb6 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ Documents - [Vagrant install](docs/vagrant.md) - [CoreOS bootstrap](docs/coreos.md) - [Debian Jessie setup](docs/debian.md) +- [openSUSE setup](docs/opensuse.md) - [Downloaded artifacts](docs/downloads.md) - [Cloud providers](docs/cloud.md) - [OpenStack](docs/openstack.md) @@ -70,6 +71,7 @@ Supported Linux Distributions - **Ubuntu** 16.04 - **CentOS/RHEL** 7 - **Fedora/CentOS** Atomic +- **openSUSE** Leap 42.3/Tumbleweed Note: Upstart/SysV init based OS types are not supported. diff --git a/docs/opensuse.md b/docs/opensuse.md new file mode 100644 index 000000000..88fac3790 --- /dev/null +++ b/docs/opensuse.md @@ -0,0 +1,19 @@ +openSUSE Leap 42.3 and Tumbleweed +=============== + +openSUSE Leap installation Notes: + +- Install Ansible + + ``` + sudo zypper ref + sudo zypper -n install ansible + + ``` + +- Install Jinja2 and Python-Netaddr + + ```sudo zypper -n install python-Jinja2 python-netaddr``` + + +Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) From bc3abad602cfa701b9f431ceec756f5ab28f71cb Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Thu, 15 Mar 2018 14:11:42 +0100 Subject: [PATCH 48/82] tests: Add CI jobs for openSUSE --- .gitlab-ci.yml | 15 +++++++++++++++ tests/files/gce_opensuse-canal.yml | 12 ++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 tests/files/gce_opensuse-canal.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6a1eef6ab..e03e64017 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -308,6 +308,10 @@ before_script: # stage: deploy-special MOVED_TO_GROUP_VARS: "true" +.opensuse_canal_variables: &opensuse_canal_variables +# stage: deploy-part2 + MOVED_TO_GROUP_VARS: "true" + # Builds for PRs only (premoderated by unit-tests step) and triggers (auto) ### PR JOBS PART1 @@ -589,6 +593,17 @@ gce_centos7-calico-ha-triggers: when: on_success only: ['triggers'] +gce_opensuse-canal: + stage: deploy-part2 + <<: *job + <<: *gce + variables: + <<: *gce_variables + <<: *opensuse_canal_variables + when: manual + except: ['triggers'] + only: ['master', /^pr-.*$/] + # no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613 gce_coreos-alpha-weave-ha: stage: deploy-special diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml new file mode 100644 index 000000000..9eae57e2e --- /dev/null +++ b/tests/files/gce_opensuse-canal.yml @@ -0,0 +1,12 @@ +# Instance settings +cloud_image_family: opensuse-leap +cloud_region: us-central1-c +mode: default + +# Deployment settings +bootstrap_os: opensuse +kube_network_plugin: canal +kubeadm_enabled: true +deploy_netchecker: true +kubedns_min_replicas: 1 +cloud_provider: gce From 3fa7468d54224d9a2baac6332a23cd9a793637c9 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 10 Apr 2018 19:55:28 +0300 Subject: [PATCH 49/82] Copy ca-key.pem to etcd and kube-masters accordingly --- roles/vault/tasks/bootstrap/main.yml | 1 + roles/vault/tasks/cluster/main.yml | 1 + roles/vault/tasks/shared/gen_ca.yml | 5 ++++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index fdecbdd2a..7ca82a9c4 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -57,6 +57,7 @@ gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}" gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_options: "{{ vault_ca_options.etcd }}" + gen_ca_copy_group: "etcd" when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed - import_tasks: gen_vault_certs.yml diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index d904c2398..65b9dae9b 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -32,6 +32,7 @@ gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}" gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_options: "{{ vault_ca_options.kube }}" + gen_ca_copy_group: "kube-master" when: inventory_hostname in groups.vault - include_tasks: ../shared/auth_backend.yml diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml index 654cc3ff3..77f2f82b9 100644 --- a/roles/vault/tasks/shared/gen_ca.yml +++ b/roles/vault/tasks/shared/gen_ca.yml @@ -24,9 +24,12 @@ mode: 0644 when: vault_ca_gen.status == 200 -- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally" + +- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts" copy: content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" dest: "{{ gen_ca_cert_dir }}/ca-key.pem" mode: 0640 when: vault_ca_gen.status == 200 + delegate_to: "{{ item }}" + with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}" From 61791bbb3d0622507ce96593d5f082d26f2c24e5 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Thu, 12 Apr 2018 14:29:34 +0300 Subject: [PATCH 50/82] Remove condition for docker pull when using download delegate --- roles/download/tasks/download_container.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index a5659619c..7e3923606 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -2,12 +2,11 @@ - name: container_download | Make download decision if pull is required by tag or sha256 include_tasks: set_docker_image_facts.yml delegate_to: "{{ download_delegate if download_run_once or omit }}" - delegate_facts: no + delegate_facts: yes run_once: "{{ download_run_once }}" when: - download.enabled - download.container - - group_names | intersect(download.groups) | length tags: - facts @@ -24,7 +23,6 @@ - download.enabled - download.container - pull_required|default(download_always_pull) - - group_names | intersect(download.groups) | length delegate_to: "{{ download_delegate }}" delegate_facts: yes run_once: yes From d87b6fd9f334dd8ea10896c46ec5da9759a5421a Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Thu, 5 Apr 2018 14:32:12 -0500 Subject: [PATCH 51/82] Use dedicated front-proxy-ca for front-proxy-client --- .../master/tasks/kubeadm-migrate-certs.yml | 4 ++++ .../manifests/kube-apiserver.manifest.j2 | 2 +- roles/kubernetes/secrets/defaults/main.yml | 1 + roles/kubernetes/secrets/files/make-ssl.sh | 19 ++++++++++++++++++- .../kubernetes/secrets/tasks/check-certs.yml | 3 +++ .../secrets/tasks/gen_certs_script.yml | 4 ++++ .../secrets/tasks/gen_certs_vault.yml | 4 +++- .../secrets/tasks/sync_kube_master_certs.yml | 12 ++++++++++++ roles/vault/defaults/main.yml | 16 ++++++++++++++-- roles/vault/tasks/cluster/create_mounts.yml | 3 ++- roles/vault/tasks/cluster/main.yml | 9 +++++++++ roles/vault/tasks/shared/issue_cert.yml | 3 ++- 12 files changed, 73 insertions(+), 7 deletions(-) diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml index 58eaaa66f..83bfbb22a 100644 --- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml +++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml @@ -9,6 +9,10 @@ - {src: apiserver-key.pem, dest: apiserver.key} - {src: ca.pem, dest: ca.crt} - {src: ca-key.pem, dest: ca.key} + - {src: front-proxy-ca.pem, dest: front-proxy-ca.crt} + - {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key} + - {src: front-proxy-client.pem, dest: front-proxy-client.crt} + - {src: front-proxy-client-key.pem, dest: front-proxy-client.key} - {src: service-account-key.pem, dest: sa.pub} - {src: service-account-key.pem, dest: sa.key} register: kubeadm_copy_old_certs diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 687ca415d..e0054686a 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -111,7 +111,7 @@ spec: - --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kube_version | version_compare('v1.9', '>=') %} - - --requestheader-client-ca-file={{ kube_cert_dir }}/ca.pem + - --requestheader-client-ca-file={{ kube_cert_dir }}/front-proxy-ca.pem - --requestheader-allowed-names=front-proxy-client - --requestheader-extra-headers-prefix=X-Remote-Extra- - --requestheader-group-headers=X-Remote-Group diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml index f0d10711d..cda85eeb2 100644 --- a/roles/kubernetes/secrets/defaults/main.yml +++ b/roles/kubernetes/secrets/defaults/main.yml @@ -1,3 +1,4 @@ --- kube_cert_group: kube-cert kube_vault_mount_path: kube +front_proxy_vault_mount_path: front-proxy diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 1c34fc69d..2a4b930ea 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -72,6 +72,15 @@ else openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 fi +# Front proxy client CA +if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then + # Reuse existing front proxy CA + cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} . +else + openssl genrsa -out front-proxy-ca-key.pem 2048 > /dev/null 2>&1 + openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days 36500 -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1 +fi + gen_key_and_cert() { local name=$1 local subject=$2 @@ -80,6 +89,14 @@ gen_key_and_cert() { openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 } +gen_key_and_cert_front_proxy() { + local name=$1 + local subject=$2 + openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 +} + # Admins if [ -n "$MASTERS" ]; then @@ -105,7 +122,7 @@ if [ -n "$MASTERS" ]; then # kube-controller-manager gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager" # metrics aggregator - gen_key_and_cert "front-proxy-client" "/CN=front-proxy-client" + gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client" for host in $MASTERS; do cn="${host%%.*}" diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 07820edf7..110ffa898 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -48,6 +48,8 @@ '{{ kube_cert_dir }}/kube-scheduler-key.pem', '{{ kube_cert_dir }}/kube-controller-manager.pem', '{{ kube_cert_dir }}/kube-controller-manager-key.pem', + '{{ kube_cert_dir }}/front-proxy-ca.pem', + '{{ kube_cert_dir }}/front-proxy-ca-key.pem', '{{ kube_cert_dir }}/front-proxy-client.pem', '{{ kube_cert_dir }}/front-proxy-client-key.pem', '{{ kube_cert_dir }}/service-account-key.pem', @@ -72,6 +74,7 @@ {% for cert in ['apiserver.pem', 'apiserver-key.pem', 'kube-scheduler.pem','kube-scheduler-key.pem', 'kube-controller-manager.pem','kube-controller-manager-key.pem', + 'front-proxy-ca.pem','front-proxy-ca-key.pem', 'front-proxy-client.pem','front-proxy-client-key.pem', 'service-account-key.pem'] -%} {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %} diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index c39f606ad..72ff6b469 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -73,6 +73,8 @@ 'kube-scheduler-key.pem', 'kube-controller-manager.pem', 'kube-controller-manager-key.pem', + 'front-proxy-ca.pem', + 'front-proxy-ca-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', 'service-account-key.pem', @@ -85,6 +87,8 @@ 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', 'apiserver-key.pem', + 'front-proxy-ca.pem', + 'front-proxy-ca-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', 'service-account-key.pem', diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index cc16b749b..05afdfcf8 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -98,6 +98,8 @@ - include_tasks: ../../../vault/tasks/shared/issue_cert.yml vars: issue_cert_common_name: "front-proxy-client" + issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}" + issue_cert_ca_filename: front-proxy-ca.pem issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube @@ -115,7 +117,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: front-proxy-client issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" - issue_cert_mount_path: "{{ kube_vault_mount_path }}" + issue_cert_mount_path: "{{ front_proxy_vault_mount_path }}" with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] notify: set secret_changed diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml index 02c512a4e..50e1a01e7 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml @@ -44,6 +44,18 @@ set_fact: sync_file_results: [] +- include_tasks: ../../../vault/tasks/shared/sync_file.yml + vars: + sync_file: front-proxy-ca.pem + sync_file_dir: "{{ kube_cert_dir }}" + sync_file_group: "{{ kube_cert_group }}" + sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_owner: kube + +- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem + set_fact: + sync_file_results: [] + - include_tasks: ../../../vault/tasks/shared/sync_file.yml vars: sync_file: "{{ item }}" diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 8e5ad08a0..f19c73438 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -97,6 +97,11 @@ vault_ca_options: format: pem ttl: "{{ vault_max_lease_ttl }}" exclude_cn_from_sans: true + front_proxy: + common_name: front-proxy + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true vault_client_headers: Accept: "application/json" @@ -164,11 +169,18 @@ vault_pki_mounts: allow_any_name: true enforce_hostnames: false organization: "system:node-proxier" + front_proxy: + name: front-proxy + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Kubernetes Front Proxy CA" + cert_dir: "{{ vault_kube_cert_dir }}" + roles: - name: front-proxy-client group: k8s-cluster - password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/front-proxy-client.creds length=15') }}" policy_rules: default role_options: allow_any_name: true enforce_hostnames: false - organization: "system:front-proxy" + organization: "system:front-proxy" \ No newline at end of file diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml index c6e075698..087430942 100644 --- a/roles/vault/tasks/cluster/create_mounts.yml +++ b/roles/vault/tasks/cluster/create_mounts.yml @@ -6,8 +6,9 @@ create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" create_mount_description: "{{ item.description }}" create_mount_cert_dir: "{{ item.cert_dir }}" - create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name + create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name and item.name != vault_pki_mounts.front_proxy.name with_items: - "{{ vault_pki_mounts.vault }}" - "{{ vault_pki_mounts.etcd }}" - "{{ vault_pki_mounts.kube }}" + - "{{ vault_pki_mounts.front_proxy }}" diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index 65b9dae9b..7f535d068 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -35,6 +35,14 @@ gen_ca_copy_group: "kube-master" when: inventory_hostname in groups.vault +- include_tasks: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_pki_mounts.front_proxy.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.front_proxy.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.front_proxy }}" + when: inventory_hostname in groups.vault + - include_tasks: ../shared/auth_backend.yml vars: auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates @@ -47,6 +55,7 @@ - "{{ vault_pki_mounts.vault }}" - "{{ vault_pki_mounts.etcd }}" - "{{ vault_pki_mounts.kube }}" + - "{{ vault_pki_mounts.front_proxy }}" loop_control: loop_var: mount when: inventory_hostname in groups.vault diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 1ba90ea77..36a42efaa 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -6,6 +6,7 @@ # issue_cert_alt_name: Requested Subject Alternative Names, in a list. # issue_cert_common_name: Common Name included in the cert # issue_cert_copy_ca: Copy issuing CA cert needed +# issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem) # issue_cert_dir_mode: Mode of the placed cert directory # issue_cert_file_group: Group of the placed cert file and directory # issue_cert_file_mode: Mode of the placed cert file @@ -100,7 +101,7 @@ - name: issue_cert | Copy issuing CA cert copy: content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n" - dest: "{{ issue_cert_path | dirname }}/ca.pem" + dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}" group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_file_mode | d('0644') }}" owner: "{{ issue_cert_file_owner | d('root') }}" From f73717ea35028a385a936d63f86ec7a56256fca5 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Thu, 12 Apr 2018 22:55:13 +0300 Subject: [PATCH 52/82] Mount local volume provisioner dirs for containerized kubelet (#2648) --- .../node/templates/kubelet-container.j2 | 4 ++++ .../node/templates/kubelet.rkt.service.j2 | 16 ++++++++++++++++ roles/kubernetes/preinstall/tasks/main.yml | 14 ++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 75e07ca27..dcf86c327 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -29,6 +29,10 @@ {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} -v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \ {% endif -%} + {% if local_volume_provisioner_enabled -%} + -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:rw \ + -v {{ local_volume_provisioner_mount_dir }}:{{ local_volume_provisioner_mount_dir }}:rw \ + {% endif %} -v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v /etc/os-release:/etc/os-release:ro \ {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 283ce1ad9..13cd9daae 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -43,6 +43,14 @@ ExecStart=/usr/bin/rkt run \ {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \ {% endif %} +{% if local_volume_provisioner_enabled %} + --volume local_volume_provisioner_base_dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \ +{# Not pretty, but needed to avoid double mount #} +{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %} + --volume local_volume_provisioner_mount_dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \ +{% endif %} +{% endif %} + {% if kubelet_load_modules == true %} --mount volume=modprobe,target=/usr/sbin/modprobe \ --mount volume=lib-modules,target=/lib/modules \ @@ -69,6 +77,14 @@ ExecStart=/usr/bin/rkt run \ {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ {% endif %} +{% if local_volume_provisioner_enabled %} + --mount local_volume_provisioner_base_dir,target={{ local_volume_provisioner_base_dir }} \ +{# Not pretty, but needed to avoid double mount #} +{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %} + --volume local_volume_provisioner_mount_dir,target={{ local_volume_provisioner_mount_dir }} \ +{% endif %} +{% endif %} + --stage1-from-dir=stage1-fly.aci \ {% if kube_hyperkube_image_repo == "docker" %} --insecure-options=image \ diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index cd5dd7acd..e3d56ac0c 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -96,6 +96,20 @@ - contiv - bootstrap-os +- name: Create local volume provisioner directories + file: + path: "{{ item }}" + state: directory + owner: kube + with_items: + - "{{ local_volume_provisioner_base_dir }}" + - "{{ local_volume_provisioner_mount_dir }}" + when: + - inventory_hostname in groups['k8s-cluster'] + - local_volume_provisioner_enabled + tags: + - persistent_volumes + - import_tasks: resolvconf.yml when: - dns_mode != 'none' From afcd5997b91570ad1403874071e38b639c59a402 Mon Sep 17 00:00:00 2001 From: Ganesh Maharaj Mahalingam Date: Thu, 12 Apr 2018 14:24:41 -0700 Subject: [PATCH 53/82] Vagrantfile: Fix default inventory path. Change to support multiple inventory path led to Vagrant environment not getting a default group_vars in it's inventory path. Using sample as the default path if none specified. Fix issue #2541 Signed-off-by: Ganesh Maharaj Mahalingam --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index cf174ef77..d0b6b73d1 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -54,7 +54,7 @@ end $box = SUPPORTED_OS[$os][:box] # if $inventory is not set, try to use example -$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory +$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory # if $inventory has a hosts file use it, otherwise copy over vars etc # to where vagrant expects dynamic inventory to be. From e95ba800ea4d38159ddf10e3ff732733d3bbe38c Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 13 Apr 2018 17:23:10 +0300 Subject: [PATCH 54/82] Define local volume provisioner dirs in defaults (#2656) --- roles/kubespray-defaults/defaults/main.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index d6217d654..1644623a8 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -219,6 +219,10 @@ vault_config_dir: "{{ vault_base_dir }}/config" vault_roles_dir: "{{ vault_base_dir }}/roles" vault_secrets_dir: "{{ vault_base_dir }}/secrets" +# Local volume provisioner dirs +local_volume_provisioner_base_dir: /mnt/disks +local_volume_provisioner_mount_dir: /mnt/disks + ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. ## See https://github.com/kubernetes-incubator/kubespray/issues/2141 ## Set this variable to true to get rid of this issue From 49e3665d96fbef8b90ec9d01da1ae14f42503263 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 13 Apr 2018 18:53:39 +0300 Subject: [PATCH 55/82] Remove prometheus operator from Kubespray (#2658) Kubespray should not install any helm charts. This is a task that a user should do on his/her own through ansible or another tool. It opens the door to wrapping installation of any helm chart. --- inventory/sample/group_vars/k8s-cluster.yml | 8 +---- roles/kubernetes-apps/meta/main.yml | 6 ---- .../kubernetes-apps/metrics/defaults/main.yml | 9 ------ roles/kubernetes-apps/metrics/tasks/main.yml | 32 ------------------- tests/files/gce_centos7-flannel-addons.yml | 2 -- 5 files changed, 1 insertion(+), 56 deletions(-) delete mode 100644 roles/kubernetes-apps/metrics/defaults/main.yml delete mode 100644 roles/kubernetes-apps/metrics/tasks/main.yml diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 345d22a36..38d2ce5e5 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -162,15 +162,9 @@ dashboard_enabled: true # Monitoring apps for k8s efk_enabled: false -# Helm deployment. Needs for Prometheus Operator, k8s metrics. +# Helm deployment helm_enabled: false -# Prometheus Operator. Needs for k8s metrics. Installed Helm is required. -prometheus_operator_enabled: false - -# K8s cluster metrics. Installed Helm and Prometheus Operator are required. -k8s_metrics_enabled: false - # Istio deployment istio_enabled: false diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index bc05e6f8c..fca51a3b6 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -27,12 +27,6 @@ dependencies: - apps - registry - - role: kubernetes-apps/metrics - when: prometheus_operator_enabled - tags: - - apps - - metrics - # istio role should be last because it takes a long time to initialize and # will cause timeouts trying to start other addons. - role: kubernetes-apps/istio diff --git a/roles/kubernetes-apps/metrics/defaults/main.yml b/roles/kubernetes-apps/metrics/defaults/main.yml deleted file mode 100644 index 72018e6f5..000000000 --- a/roles/kubernetes-apps/metrics/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Prometheus Operator. Needs for k8s metrics. Installed Helm is required. -prometheus_operator_enabled: false - -# K8s cluster metrics. Installed Helm and Prometheus Operators are required. -k8s_metrics_enabled: false - -# Separate namespace for monitoring/metrics -monitoring_namespace: "monitoring" diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml deleted file mode 100644 index e2280e98b..000000000 --- a/roles/kubernetes-apps/metrics/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Metrics | Make sure Helm is installed - command: "{{ bin_dir }}/helm version" - register: helm_ready_result - until: helm_ready_result|succeeded - retries: 4 - delay: 5 - when: - - prometheus_operator_enabled - - inventory_hostname == groups['kube-master'][0] - -- name: Metrics | Add coreos repo - command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/" - when: - - prometheus_operator_enabled - - inventory_hostname == groups['kube-master'][0] - run_once: true - -- name: Metrics | Install Prometheus Operator - command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}" - when: - - prometheus_operator_enabled - - inventory_hostname == groups['kube-master'][0] - run_once: true - -- name: Metrics | Install K8s cluster metrics - command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}" - when: - - prometheus_operator_enabled - - k8s_metrics_enabled - - inventory_hostname == groups['kube-master'][0] - run_once: true diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index c12092011..9e2e1083f 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -16,7 +16,5 @@ deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce kube_encrypt_secret_data: true -prometheus_operator_enabled: true -k8s_metrics_enabled: true ingress_nginx_enabled: true cert_manager_enabled: true From c432697667f145edc5f42862f71a8414d104bf40 Mon Sep 17 00:00:00 2001 From: Ganesh Maharaj Mahalingam Date: Fri, 13 Apr 2018 10:54:21 -0700 Subject: [PATCH 56/82] Vagrantfile: Add vagrant inventory file in any directory to .gitignore Follow-on fix for #2654 Signed-off-by: Ganesh Maharaj Mahalingam --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8da099d42..e50e78e22 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ .vagrant *.retry -inventory/vagrant_ansible_inventory +**/vagrant_ansible_inventory inventory/credentials/ inventory/group_vars/fake_hosts.yml inventory/host_vars/ From 02cd5418c22d51e40261775908d55bc562206023 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sun, 15 Apr 2018 18:32:49 +0300 Subject: [PATCH 57/82] Weave limits (#2660) * Raise limits for weave * Adjust weave limits --- roles/network_plugin/weave/defaults/main.yml | 2 +- tests/files/gce_centos-weave-kubeadm.yml | 2 -- tests/files/gce_coreos-alpha-weave-ha.yml | 2 -- tests/files/gce_rhel7-weave.yml | 2 -- tests/files/gce_ubuntu-weave-sep.yml | 2 -- 5 files changed, 1 insertion(+), 9 deletions(-) diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml index eecb06171..ab955ebef 100644 --- a/roles/network_plugin/weave/defaults/main.yml +++ b/roles/network_plugin/weave/defaults/main.yml @@ -1,7 +1,7 @@ --- # Limits weave_memory_limits: 400M -weave_cpu_limits: 30m +weave_cpu_limits: 300m weave_memory_requests: 64M weave_cpu_requests: 10m diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml index a1c88e976..a410be3f2 100644 --- a/tests/files/gce_centos-weave-kubeadm.yml +++ b/tests/files/gce_centos-weave-kubeadm.yml @@ -7,8 +7,6 @@ startup_script: "" # Deployment settings kube_network_plugin: weave -weave_cpu_limits: "100m" -weave_cpu_requests: "100m" kubeadm_enabled: true deploy_netchecker: true kubedns_min_replicas: 1 diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml index 1666e0927..883a67e2a 100644 --- a/tests/files/gce_coreos-alpha-weave-ha.yml +++ b/tests/files/gce_coreos-alpha-weave-ha.yml @@ -7,8 +7,6 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd' # Deployment settings kube_network_plugin: weave -weave_cpu_limits: "100m" -weave_cpu_requests: "100m" bootstrap_os: coreos resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12 deploy_netchecker: true diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml index e6928b7a2..bfff490da 100644 --- a/tests/files/gce_rhel7-weave.yml +++ b/tests/files/gce_rhel7-weave.yml @@ -5,8 +5,6 @@ mode: default # Deployment settings kube_network_plugin: weave -weave_cpu_limits: "100m" -weave_cpu_requests: "100m" deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml index 6e701cb23..4598672d1 100644 --- a/tests/files/gce_ubuntu-weave-sep.yml +++ b/tests/files/gce_ubuntu-weave-sep.yml @@ -6,8 +6,6 @@ mode: separate # Deployment settings bootstrap_os: ubuntu kube_network_plugin: weave -weave_cpu_limits: "100m" -weave_cpu_requests: "100m" deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce From ea44ad4d75134e5b48c5e4669bb4fcd7482badd8 Mon Sep 17 00:00:00 2001 From: Arslanbekov Denis Date: Mon, 16 Apr 2018 17:29:55 +0300 Subject: [PATCH 58/82] Added img kubernetes-logo.png --- docs/img/kubernetes-logo.png | Bin 0 -> 6954 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/img/kubernetes-logo.png diff --git a/docs/img/kubernetes-logo.png b/docs/img/kubernetes-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2838a1829ff3588df5e0d8110a62703031be03ee GIT binary patch literal 6954 zcmZ`;bx;&e*uJAv?r4rakWT5Yqa+0B5ad8YLXnW>=%Y)eLqb4N8fi{A`al$ypf?cB^iVa002cAe~$=kawOhR-wI^1;r zxf28~Y6fZmKwT>NwFBXu&f%!@*Z=?s;sF2<(Ez~Zoe8lC0QiXk0NYOi0QqbHfZ=6+ zo3Y~E1QA?M8+NzkT`TUa$h@-<`|28K5@QKKWK0TlPq^~|0Emb#OwBZ4Zm-~VzUf-) zK=(#Ize0n;e3qu=9Ny2jJkml>UvN)Z4kq_k8f$pv+I5I)*68Yc=~uVL_k#@J6f)w? z+zCK(2$(-1GLpl9-tuX1&ds7qp^v}ISpICz>=Vn*vHhc2DvaDl==b@;jnM62$xHA= zS2MZr2Vn7g_k@=BM#hw_AKYuV7dii33@iQ}EX^9fEX{Bu_kZtaVMc%fD&xNa+90|C z?g$dZFV8)OWIFk7a>n%i`j=5(5<% z;|qGQqC8xW7YY=@Dwr-tcMox zmo9X)3#)6mckmJc;P?Ufb)<^qyA58||BFO$=^EPCG8+|Ri?)0^_UkJm9uZ<@vN2=Z(^WhNzH4tFuZfnGq`0q4!ngEQyIeu@1bun2B z)r{15@(Avh_%Sj(7kQZ(p$#rjcHtuo&V@&pCS0ytzW6vE!4<)k77(%XL9{ZsCRG4I z6vEts7q4=Vw{JyuT~gHF(EXChSl7UXI*UZ!vYj3tJSTia)c zutN_)at5ioR>PzXL75oKmdbSPo7sM1KQQ)T88S~)XuYi=u~I}z*3%le)YTaM7cn-^ z9!nZ)GD%LEt)_Axx|Xgj>hP5+wB(mW-b_uqs=-$k+9bX|fo*T;m^EyDoDuByW?Ck} z`E%byBn{BV+F8vFyL%kTp56%|5Q`!^0#z5CI*%u5cgy|9l!phN@zOo0oP2td zufEP=tZEkjEiSlZp|vIFEpsmnJ{&)sY6?=d^|^a1+UyQoVP>TIwJK%6^h#{GAE!Sn z()+vLT9UGUg4ZnT+LU(QOf6UB`^37jovDMM=n!&p^b&i#z^T-L)v>xlp1)?RXBhw6 zK6x&p))_yJe$Q}M_{0}J5w5*cyF|oe)oZ!XP0;#xz}xW6=0_&s;dQEv!mgUKgDbsY?jM-^}nu9(D@etDfr%*4rGwOw6d zq%TIhtQ^_*G@r=BkwW4FB3`F;eb+M2#HfwQC(AxtWp<-oJMS*oo2Fxu75Njsz2&9R z>po?>{Np7vCG&+i-sbm%9>R z*FD7qJI!dmh0V884m&w(E|lNIyCWpo0n%;C6g)NCR z!S077^CzB8+SlbMs+TFFvXB;v0N}Yut+d7yHg5498 z(u`b(6zxa;Ic_L^5{=me%vLJ+!+N>PB)}xy=tw3V@%b6kuq(-raE2TcUfjY8YHLvO zhE`XGDxKALIZ5)&9MJ7sIr5@w$DQQIyf}qJECYx8W-v-6*tM5$IZj6`UvyL=4xg8k zdBQ*kV0EVcn2&O0E+vz}LaeN38O@*Z<+jI0jb7yW>c7r>*jO=QCS#d&g*f7o_ww(rszJd>eDf?I3~?^lIIcH{A7>4-#}9eCkp0^z z+A7Jf<;*JIEX8+**rPFwd;41I7?mtZ$wT4exLJFmnqo2geP!X$UU#}o11OE~XCzYYTf8MT>K1CS_|B6^omT@K*jL^?62NHHtn}Hi5=4Pp;k1%=#=h>@q zfMQJLgD{wZT+XUo0V4m+iDz3*`AEo=@|gkh`87SAyt)_*fHDJNW7=kwy%}6Z#h$5UfX~R7DU>VJq@iLRk|8v zWC0<-044`@NIh5h+eyte^)b71r6Kc0J)O~U4-Nlz*po8{n7pOVU#vVn=kjN!Ify2D zzHj|j%E^IBy@Mv_Koln0jI1Cyr)nsO#|AUO_iJfxSyowwU}zBdqI;8PDh~7d-y5e` zS#smoc9`)o#vZ4em6xI!;}(=dS#``4XFwTfzm}fCIb}U*X#8qZK;dlI2w=0%iY$yw zTz&4>5E;5~x+!97T+?GhHxDbw-$uxm_!JinU6zJTs`$jF3O*7pYc%36_#Diocx=lo z`i1=OvXAE^Yg(Au+f7^XiZmJ?2C+)hf${ee)vjA<^aLRgt@VH^CdlyH!>ZjCP5NJW zk578;lX$Pet&}Y{RjV(jR{4wBz=!s)0>Yp}iNXj_O+l+t>gI8LY?Z&$`%cH;W}=~H z4AJMoF`A0`m?pydhu_V)_kp9xlW{1qz&Y{c& zLz{1f7^`E=4)~jb$c222bl`4Cv$dNa2iuU#cZa(y%^zAeu zgNl0(08O(Xm?E3OByqO>=@ur_=gSW{@=EXu>JejI>#}RWf*w2vGEL=z@85-2Gia`N zy^wWb+R?9@W7|D|-h|!~S2b~&;XJufv-SjXG&!zU@IY`2aDz|=q~mhnt^pr;#G6w) z0vf3+$0eg?QD1Ytrn6Z0(>p{KoTRCFn!l>l5qlTSnoS5qyV-oy;U6RV?zi6U&x}xK zjQU+E4P@HaCz|!Rl+kSdG!rnG*pU6w1kH7yMK+AdtMz^U$QE- z`lopfjC214#$n!t1r2=b z*K!i)4pLX-9-yVrCh1LK2BlAu{<=3!L}kR@E6eLo#{qi|4!=K!J+LY}iroxK$&R(iK7zo;poqL8H_UEb+_>|(2Ap`ZAQ{no88J)tPf-}CAeM(j-B%S@b zk-ggj*I595SG`p>Q*)H#xH^Xu{d!cfw%15Z_pKI6Z8piaz8;CYrll_v_+zl=U1-l* zRwo?$-aX(qB&?>N;gKjdK`sVXlF&DzFw-V8JZY_qU#L9SJ16WQy^AdNqbksHW_Pm} zhx`z)A+X!iywE5>lvM;)JUqJXdRlx>#n@`GK_aTFqo%vRx_d0Wc6uV7k!2@h2PE~K z^ewW&S%A8+3D_1N&JMqR8MV91zL$E(Ny#vHLctZ>H=5S&YyUV|ROSa-F}LWvv2X!- z^@lxSe#x^mou;(pD=+T6;Br5;byi&INAy$ErbH%3MWIjyzMM#~CaOl9|{+X>6qNFX1ZM# zN9dVZzcg`(Gd|*S_hW+&DG&;%QCj^Um5=t_x2NfF|JSm=M=n>t)tpk2QUJQ>^i8w3?Do@P~NmEJph ztYBH#dJv$(FnU_*o^f=$usz02-mMfdh-O0zXAOPNC#T?cSQX61rct^;6Z?mHW|JCj z!|AIMu}{~+Qngz;S?6_|ttL6K4p3hhyO0*6hIBalYpe=F$HzZEWk$xj;ksdf%UAyR zx4f$~O><^jAR{Vf@LqvuUVS>upHl?=owE^6r7!qR^3M(+mt4|YVL|`Z<}+$Zm~HrG ztNGN6R3o-=s{n6t1~&V_S&7iQzu~FY$8PUc(PUD8d@9**+0hZDqi@VMF1PQ>Zc${6 zwiwFUDr@y#EYa%m10 z8G~Q1Aa9q3(&l4ZE^|Gn{V+%^2Gn+Y+~DhT$wEzU;sdUqNQpKgg>A=cS8s{0cGf-T z4qjfZWeW1IF51=1y(r$l@r%yF#xxcDmtMfsv5DmF4V1fYw!w%NTU%mI6redj109i; zH#UUNM>~o4_-Z!5@U~v*;xKe=J-xAl5G*amUwJ6{Ni@N8jJ9lDM9b*q&jT{<71y|n zMFFQDa!i5S;4{OL2)@V=;uYHrhVB%ddkv#EovC8hqHsP)%6SzP2h>rT{{wz9FBpGj zrFo|T{OL>py}yJ6J&{vC$6=IDMSb#jivl1i&btUYOLnq`A9 zB6p6UOItLZ3!(P~#y?3FOL@wrFPNUQgKShD2svvf@p-Pj-bUdda@rCr>s)Lyjb|Uc z1iH%>C2}1uJA!U&aBQD;;8ZOqKX~XdIhzADND4#HFpdh{YSKr(DD!S#7u1O7D(XB4W&1Vvt?zl;VYDUiH zR<~gRr!RYGb}hz%*|I-BHFTSfB)Kq)39h!0tH_WaD+N(tqTRAChpw7jCoB~Q^(^1f zKHP@=r|~Z~{Zh}2mRc=FRo;y5Cwf#&8f_x<|K{1<*T1+AXIt;6@iSWBoNbeRw94pO zEv7-Zl!3h2W|`bkmkx7ImM<%y(62S%c)cR^k5t!++{!mHq|^S{xD%Dtb?xTyqKEnf z4vS~YyBXxrsYNoF?WiUtzt;877aK;-#l>j_{(L3FYt(tg=>p=G0YtM%k+G6ODB7`c z`1*HzzMxU86K?InemRmhEg<0+2qvI6P0WiYfZ!aree<;uj`!%~tB);*V14u;r5v;Ei5I2VsUypO8V z*P3L~kAD}<{0SaC71FTPrY}_WU)bj$D)un+P3BDMAL^%murqQ$&Oxvqr-JNFHz1vf z;tumr-KCx-x|rd@P}j`QZ$X3mOI!j+gR&+X45y<8S9sZ>KGOVg(v&@NjbHn!1*65j z7uvQ>@oUD<#Dv6pDM5iw?RjoxS~e*tIk*ICZW^Mv2<}CZk|<~m>Zd#QP-R4Mm62{u zO5Ra;(fhYVUzLCU7wNUwLX=Yp4q7OZUG?EWd5r`e{kk31r1l!&jgr~0u+wRJ>{fgm zU5nX7lLgkhZ;tq~V@Hr%V7$v{k;gBCu7^`kx1d;-TDfi1e@_?oLV@yPcw9Dm#GQnb z7z|%}_U?D|%jMBBY#@w5k-BbuI@^{*bytXet>oWiLBI*m2)Oe3k;>(pb0qTa_9_~ z9=L(|>?UrF8I$ClghW?ohNlNT{{ewX6-6M*WeXpBoM9jF`FqE_1m~r~Sgv z-YAYJm)h_|!lT4Q@t*c%TkF-_t5fN|mYW-*-1$CuO?6eh9XxauUv6sWkNZfDV$W!g zW^%{1*P!lkDl~%pH6T5V_N<_!JBVhe{25;<=lkp_;5%`jag9|rk?jWqU1@)!gg@6# zI6I{~W%A6^2;rMBmDp>6drdMgQf^4R|H-0~L18ho8;m_~@LrISAK{3HK7cU+L|4exM;rs=wmK>2>WxZa$ znQE0iFN?8aLrFv8!Dp~HTo5u2eyrGD9|exWGksI`Hsv;&(c?1n;Is2tT(^=5E15W1 z^D|=$9$32OOR-d;eVBh^0r4R#J24ETTi%_~LmOI@cMt6~Ilucu7HY71Jye_Y{A*yX zi(H9leo?^py`PM>&b42S>ksN${h9vo{GGRRdtoB0eCl0!gvlyBdk7%SwK2wf|CiHB z>FY4>=Y?T3wMO5V(SGK?GnY<|260cJasSNTmv%;SQyrg8zluT?kQI(2q^Kw+1Hpl- zhg)ifSEosYe&}Zt%D_XPdqYk4h@;a}4HSt8j)KD20$Cud`Yj)T;|c7$sZo;cW1-(^ z{ Date: Mon, 16 Apr 2018 17:30:53 +0300 Subject: [PATCH 59/82] Usage kubernetes-logo in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 094c36bb6..e1852846a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png) +![Kubernetes Logo](https://raw.githubusercontent.com/Arslanbekov/kubespray/master/docs/img/kubernetes-logo.png) Deploy a Production Ready Kubernetes Cluster ============================================ From 9f460dd1bfe17b06d7c27256c46cb95a5f31e8d1 Mon Sep 17 00:00:00 2001 From: Arslanbekov Denis Date: Mon, 16 Apr 2018 17:32:00 +0300 Subject: [PATCH 60/82] Change uri --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e1852846a..dfe8a28f4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Kubernetes Logo](https://raw.githubusercontent.com/Arslanbekov/kubespray/master/docs/img/kubernetes-logo.png) +![Kubernetes Logo](https://raw.githubusercontent.com/Arslanbekov/kubespray/kubernetes-logo/docs/img/kubernetes-logo.png) Deploy a Production Ready Kubernetes Cluster ============================================ From 1bd49ff12558beff6e2668164922f493bb25ea92 Mon Sep 17 00:00:00 2001 From: Arslanbekov Denis Date: Mon, 16 Apr 2018 17:33:24 +0300 Subject: [PATCH 61/82] Add production uri --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dfe8a28f4..ff241038e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Kubernetes Logo](https://raw.githubusercontent.com/Arslanbekov/kubespray/kubernetes-logo/docs/img/kubernetes-logo.png) +![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-incubator/kubespray/master/docs/img/kubernetes-logo.png) Deploy a Production Ready Kubernetes Cluster ============================================ From 7968437a65d07d8a3240223f3c6837fd1691d780 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 17 Apr 2018 08:51:24 +0800 Subject: [PATCH 62/82] Weave: Upgrade to 2.3.0 --- README.md | 2 +- roles/download/defaults/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ff241038e..45a3515bf 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ Versions of supported components - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.0.0-rc8 - [contiv](https://github.com/contiv/install/releases) v1.1.7 -- [weave](http://weave.works/) v2.2.1 +- [weave](http://weave.works/) v2.3.0 - [docker](https://www.docker.com/) v17.03 (see note) - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 84f78e404..b74ba7ff3 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -38,7 +38,7 @@ flannel_version: "v0.10.0" flannel_cni_version: "v0.3.0" istio_version: "0.2.6" vault_version: 0.8.1 -weave_version: 2.2.1 +weave_version: 2.3.0 pod_infra_version: 3.0 contiv_version: 1.1.7 cilium_version: "v1.0.0-rc8" From 54beb27eaa829d010cd0ee6b8fe7f94d557b8cb4 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 17 Apr 2018 12:08:10 +0800 Subject: [PATCH 63/82] cert-manager: Upgrade to v0.2.4 --- roles/download/defaults/main.yml | 2 +- .../cert_manager/templates/cert-manager-certificate-crd.yml.j2 | 2 +- .../templates/cert-manager-clusterissuer-crd.yml.j2 | 2 +- .../cert_manager/templates/cert-manager-clusterrole.yml.j2 | 2 +- .../templates/cert-manager-clusterrolebinding.yml.j2 | 2 +- .../cert_manager/templates/cert-manager-deploy.yml.j2 | 2 +- .../cert_manager/templates/cert-manager-issuer-crd.yml.j2 | 2 +- .../cert_manager/templates/cert-manager-sa.yml.j2 | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 84f78e404..cc472057c 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -158,7 +158,7 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin ingress_nginx_controller_image_tag: "0.12.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" -cert_manager_version: "v0.2.3" +cert_manager_version: "v0.2.4" cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" cert_manager_controller_image_tag: "{{ cert_manager_version }}" cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 index 48d0c5b49..0d27800b3 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 @@ -5,7 +5,7 @@ metadata: name: certificates.certmanager.k8s.io labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller spec: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 index 86601e098..8ac64e35f 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 @@ -5,7 +5,7 @@ metadata: name: clusterissuers.certmanager.k8s.io labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller spec: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 index 9d36de5cb..ce6aa48bf 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 @@ -5,7 +5,7 @@ metadata: name: cert-manager labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller rules: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 index d0e481c6c..d1e26e462 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 @@ -5,7 +5,7 @@ metadata: name: cert-manager labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller roleRef: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 index ef66bef05..7fe98407b 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 @@ -6,7 +6,7 @@ metadata: namespace: {{ cert_manager_namespace }} labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller spec: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 index 7e344d9f9..a11386d10 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 @@ -5,7 +5,7 @@ metadata: name: issuers.certmanager.k8s.io labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller spec: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 index ccdd5f430..1a67bf6a4 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 @@ -6,6 +6,6 @@ metadata: namespace: {{ cert_manager_namespace }} labels: app: cert-manager - chart: cert-manager-0.2.5 + chart: cert-manager-0.2.8 release: cert-manager heritage: Tiller From 23e9737b8575ba8bcd77259108650a2ee126fb12 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 17 Apr 2018 12:19:44 +0800 Subject: [PATCH 64/82] ingress-nginx: Upgrade to 0.13.0 --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 84f78e404..feff58d1e 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -155,7 +155,7 @@ local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" -ingress_nginx_controller_image_tag: "0.12.0" +ingress_nginx_controller_image_tag: "0.13.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" cert_manager_version: "v0.2.3" From d435e176818a5a8fc046a2491cf24705606223a4 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 17 Apr 2018 13:41:34 +0800 Subject: [PATCH 65/82] cephfs-provisioner: Upgrade to a71a49d4 --- extra_playbooks/build-cephfs-provisioner.yml | 10 +++++----- roles/download/defaults/main.yml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml index 6a72a076e..267c724ee 100644 --- a/extra_playbooks/build-cephfs-provisioner.yml +++ b/extra_playbooks/build-cephfs-provisioner.yml @@ -8,8 +8,8 @@ version: "{{ item.version }}" state: "{{ item.state }}" with_items: - - { state: "present", name: "docker", version: "2.7.0" } - - { state: "present", name: "docker-compose", version: "1.18.0" } + - { state: "present", name: "docker", version: "3.2.1" } + - { state: "present", name: "docker-compose", version: "1.21.0" } - name: CephFS Provisioner | Check Go version shell: | @@ -36,18 +36,18 @@ git: repo: https://github.com/kubernetes-incubator/external-storage.git dest: "~/go/src/github.com/kubernetes-incubator" - version: 92295a30 + version: a71a49d4 clone: no update: yes - name: CephFS Provisioner | Build image shell: | cd ~/go/src/github.com/kubernetes-incubator/external-storage - REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs + REGISTRY=quay.io/kubespray/ VERSION=a71a49d4 make ceph/cephfs - name: CephFS Provisioner | Push image docker_image: - name: quay.io/kubespray/cephfs-provisioner:92295a30 + name: quay.io/kubespray/cephfs-provisioner:a71a49d4 push: yes retries: 10 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 84f78e404..a7c582d58 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -153,7 +153,7 @@ registry_proxy_image_tag: "0.4" local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner" local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" -cephfs_provisioner_image_tag: "92295a30" +cephfs_provisioner_image_tag: "a71a49d4" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" ingress_nginx_controller_image_tag: "0.12.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" From 756af57787dbf651c89a12f97d7870c97c8bd11f Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 18 Apr 2018 10:15:40 +0200 Subject: [PATCH 66/82] Properly check need_pip, always run pip to check if needed pip was always being downloaded on subsequent runs, This PR always runs the pip command, and checks the rc of it before downloading pip Fix in favor of #2582 --- roles/bootstrap-os/tasks/bootstrap-coreos.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml index 428065eba..be0030538 100644 --- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -22,7 +22,6 @@ failed_when: false changed_when: false check_mode: no - when: need_bootstrap.rc != 0 tags: - facts @@ -30,24 +29,24 @@ copy: src: get-pip.py dest: ~/get-pip.py - when: need_pip != 0 + when: need_pip.rc != 0 - name: Bootstrap | Install pip shell: "{{ansible_python_interpreter}} ~/get-pip.py" - when: need_pip != 0 + when: need_pip.rc != 0 - name: Bootstrap | Remove get-pip.py file: path: ~/get-pip.py state: absent - when: need_pip != 0 + when: need_pip.rc != 0 - name: Bootstrap | Install pip launcher copy: src: runner dest: /opt/bin/pip mode: 0755 - when: need_pip != 0 + when: need_pip.rc != 0 - name: Install required python modules pip: From 296b92dbd47b5b54fb94cc6ef4fced2823f9e7d8 Mon Sep 17 00:00:00 2001 From: Samuel Vandamme Date: Thu, 12 Apr 2018 11:41:36 +0200 Subject: [PATCH 67/82] Replaced 'mem' with 'memory/ in elasticsearch and kibana deployment --- .../elasticsearch/templates/elasticsearch-deployment.yml.j2 | 4 ++-- .../efk/kibana/templates/kibana-deployment.yml.j2 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 index ee2eb8b21..4cdcf33ad 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 @@ -30,12 +30,12 @@ spec: limits: cpu: {{ elasticsearch_cpu_limit }} {% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %} - mem: {{ elasticsearch_mem_limit }} + memory: "{{ elasticsearch_mem_limit }}" {% endif %} requests: cpu: {{ elasticsearch_cpu_requests }} {% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %} - mem: {{ elasticsearch_mem_requests }} + memory: "{{ elasticsearch_mem_requests }}" {% endif %} ports: - containerPort: 9200 diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 index 4fdf54c04..c5603d389 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 @@ -26,12 +26,12 @@ spec: limits: cpu: {{ kibana_cpu_limit }} {% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %} - mem: {{ kibana_mem_limit }} + memory: "{{ kibana_mem_limit }}" {% endif %} requests: cpu: {{ kibana_cpu_requests }} {% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %} - mem: {{ kibana_mem_requests }} + memory: "{{ kibana_mem_requests }}" {% endif %} env: - name: "ELASTICSEARCH_URL" From 49c6bf8fa616dd1a320648cd02a9029c6a81f3b9 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Wed, 18 Apr 2018 13:16:42 -0400 Subject: [PATCH 68/82] support custom env vars for etcd --- roles/etcd/defaults/main.yml | 6 ++++++ roles/etcd/templates/etcd.env.j2 | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 6c13810c5..7b7a1fc5a 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -31,6 +31,12 @@ etcd_election_timeout: "5000" etcd_metrics: "basic" +## A dictionary of extra environment variables to add to etcd.env, formatted like: +## etcd_extra_vars: +## ETCD_VAR1: "value1" +## ETCD_VAR2: "value2" +etcd_extra_vars: {} + # Limits # Limit memory only if <4GB memory on host. 0=unlimited etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}" diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2 index 178366d00..c18fb4132 100644 --- a/roles/etcd/templates/etcd.env.j2 +++ b/roles/etcd/templates/etcd.env.j2 @@ -27,3 +27,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} + +{% for key, value in etcd_extra_vars.iteritems() %} +{{ key }}={{ value }} +{% endfor %} From 0945eb990a138d418f33855139e6e96d2abca639 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Thu, 19 Apr 2018 16:47:20 +0300 Subject: [PATCH 69/82] Make it possible to skip docker role as a var (#2686) --- cluster.yml | 2 +- roles/kubespray-defaults/defaults/main.yaml | 4 ++++ scale.yml | 2 +- upgrade-cluster.yml | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cluster.yml b/cluster.yml index fb7dec4cb..b4a48985f 100644 --- a/cluster.yml +++ b/cluster.yml @@ -33,7 +33,7 @@ roles: - { role: kubespray-defaults} - { role: kubernetes/preinstall, tags: preinstall } - - { role: docker, tags: docker } + - { role: docker, tags: docker, when: manage_docker|default(true) } - role: rkt tags: rkt when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 82df06f21..f297c007e 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -129,6 +129,10 @@ kube_apiserver_insecure_port: 8080 # Aggregator kube_api_aggregator_routing: false +# Docker options +# Optionally do not run docker role +manage_docker: true + # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" diff --git a/scale.yml b/scale.yml index bcf6c69b0..3f8613011 100644 --- a/scale.yml +++ b/scale.yml @@ -28,7 +28,7 @@ roles: - { role: kubespray-defaults} - { role: kubernetes/preinstall, tags: preinstall } - - { role: docker, tags: docker } + - { role: docker, tags: docker, when: manage_docker|default(true) } - role: rkt tags: rkt when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 7acec3083..9e858acd3 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -34,7 +34,7 @@ roles: - { role: kubespray-defaults} - { role: kubernetes/preinstall, tags: preinstall } - - { role: docker, tags: docker } + - { role: docker, tags: docker, when: manage_docker|default(true) } - role: rkt tags: rkt when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" From a49e06b54bbec00c5c4cc27e4cd5562de9d3bb97 Mon Sep 17 00:00:00 2001 From: oz123 Date: Thu, 19 Apr 2018 15:46:42 +0200 Subject: [PATCH 70/82] Document how to allow ipip traffic with calico on OpenStack --- docs/calico.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/calico.md b/docs/calico.md index 7992e57eb..b8cdc90cb 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -169,3 +169,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is ``` calico_node_ignorelooserpf: true ``` + +Note that in OpenStack you must allow `ipip` traffic in your security groups, +otherwise you will experience timeouts. +To do this you must add a rule which allows it, for example: + +``` +neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t +neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t +``` From 75950344fb53c8d7189279d83f37995746ae9686 Mon Sep 17 00:00:00 2001 From: Paul Montero Date: Thu, 19 Apr 2018 11:38:13 -0500 Subject: [PATCH 71/82] run_once pre_upgrade tasks which are executing in localhost --- roles/kubernetes/preinstall/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index e3d56ac0c..8df0ff9ee 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -5,6 +5,7 @@ # This is run before bin_dir is pinned because these tasks are run on localhost - import_tasks: pre_upgrade.yml + run_once: true tags: - upgrade From f81e6d2ccf96f831d5b54c41d5790c44b2b78910 Mon Sep 17 00:00:00 2001 From: Suzuka Asagiri Date: Mon, 23 Apr 2018 12:17:00 +0900 Subject: [PATCH 72/82] Add oidc-user-prefix and oidc-group-prefix args --- inventory/sample/group_vars/k8s-cluster.yml | 2 ++ roles/kubernetes/master/defaults/main.yml | 2 ++ .../master/templates/manifests/kube-apiserver.manifest.j2 | 6 ++++++ 3 files changed, 10 insertions(+) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 38d2ce5e5..13a7ddff5 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -58,7 +58,9 @@ kube_users: ## Optional settings for OIDC # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_username_claim: sub +# kube_oidc_username_prefix: oidc: # kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: oidc: # Choose network plugin (cilium, calico, contiv, weave or flannel) diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index c2715df85..52b04be50 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -73,7 +73,9 @@ kube_oidc_auth: false ## Optional settings for OIDC # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_username_claim: sub +# kube_oidc_username_prefix: oidc: # kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: oidc: ## Variables for custom flags apiserver_custom_flags: [] diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index e0054686a..b589a9176 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -73,9 +73,15 @@ spec: {% if kube_oidc_username_claim is defined %} - --oidc-username-claim={{ kube_oidc_username_claim }} {% endif %} +{% if kube_oidc_username_prefix is defined %} + - "--oidc-username-prefix={{ kube_oidc_username_prefix }}" +{% endif %} {% if kube_oidc_groups_claim is defined %} - --oidc-groups-claim={{ kube_oidc_groups_claim }} {% endif %} +{% if kube_oidc_groups_prefix is defined %} + - "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}" +{% endif %} {% endif %} - --secure-port={{ kube_apiserver_port }} - --insecure-port={{ kube_apiserver_insecure_port }} From 51f4e6585a2000bd226c42ffde813639e4154ac6 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 23 Apr 2018 14:28:24 +0300 Subject: [PATCH 73/82] Revert "Add openSUSE support" (#2697) --- .gitlab-ci.yml | 15 ------------- README.md | 2 -- Vagrantfile | 7 ++---- docs/opensuse.md | 19 ---------------- .../bootstrap-os/tasks/bootstrap-opensuse.yml | 7 ------ roles/bootstrap-os/tasks/main.yml | 22 +++++-------------- roles/docker/tasks/main.yml | 22 ++++--------------- roles/docker/templates/docker.service.j2 | 6 ----- roles/docker/vars/suse.yml | 15 ------------- roles/etcd/tasks/upd_ca_trust.yml | 6 ++--- roles/kubernetes/preinstall/defaults/main.yml | 2 +- roles/kubernetes/preinstall/tasks/main.yml | 9 -------- .../preinstall/tasks/verify-settings.yml | 4 ++-- roles/kubernetes/preinstall/vars/suse.yml | 4 ---- .../kubernetes/secrets/tasks/upd_ca_trust.yml | 6 ++--- roles/rkt/tasks/install.yml | 17 +++++++++++--- roles/rkt/vars/suse.yml | 2 -- tests/files/gce_opensuse-canal.yml | 12 ---------- 18 files changed, 33 insertions(+), 144 deletions(-) delete mode 100644 docs/opensuse.md delete mode 100644 roles/bootstrap-os/tasks/bootstrap-opensuse.yml delete mode 100644 roles/docker/vars/suse.yml delete mode 100644 roles/kubernetes/preinstall/vars/suse.yml delete mode 100644 roles/rkt/vars/suse.yml delete mode 100644 tests/files/gce_opensuse-canal.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e03e64017..6a1eef6ab 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -308,10 +308,6 @@ before_script: # stage: deploy-special MOVED_TO_GROUP_VARS: "true" -.opensuse_canal_variables: &opensuse_canal_variables -# stage: deploy-part2 - MOVED_TO_GROUP_VARS: "true" - # Builds for PRs only (premoderated by unit-tests step) and triggers (auto) ### PR JOBS PART1 @@ -593,17 +589,6 @@ gce_centos7-calico-ha-triggers: when: on_success only: ['triggers'] -gce_opensuse-canal: - stage: deploy-part2 - <<: *job - <<: *gce - variables: - <<: *gce_variables - <<: *opensuse_canal_variables - when: manual - except: ['triggers'] - only: ['master', /^pr-.*$/] - # no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613 gce_coreos-alpha-weave-ha: stage: deploy-special diff --git a/README.md b/README.md index 45a3515bf..081c28fe6 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,6 @@ Documents - [Vagrant install](docs/vagrant.md) - [CoreOS bootstrap](docs/coreos.md) - [Debian Jessie setup](docs/debian.md) -- [openSUSE setup](docs/opensuse.md) - [Downloaded artifacts](docs/downloads.md) - [Cloud providers](docs/cloud.md) - [OpenStack](docs/openstack.md) @@ -71,7 +70,6 @@ Supported Linux Distributions - **Ubuntu** 16.04 - **CentOS/RHEL** 7 - **Fedora/CentOS** Atomic -- **openSUSE** Leap 42.3/Tumbleweed Note: Upstart/SysV init based OS types are not supported. diff --git a/Vagrantfile b/Vagrantfile index d0b6b73d1..720e2419f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,8 +18,6 @@ SUPPORTED_OS = { "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, "centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"}, - "opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, - "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, } # Defaults for config options defined in CONFIG @@ -86,6 +84,7 @@ Vagrant.configure("2") do |config| if Vagrant.has_plugin?("vagrant-vbguest") then config.vbguest.auto_update = false end + (1..$num_instances).each do |i| config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| config.vm.hostname = vm_name @@ -111,10 +110,8 @@ Vagrant.configure("2") do |config| end end - config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] - $shared_folders.each do |src, dst| - config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] + config.vm.synced_folder src, dst end config.vm.provider :virtualbox do |vb| diff --git a/docs/opensuse.md b/docs/opensuse.md deleted file mode 100644 index 88fac3790..000000000 --- a/docs/opensuse.md +++ /dev/null @@ -1,19 +0,0 @@ -openSUSE Leap 42.3 and Tumbleweed -=============== - -openSUSE Leap installation Notes: - -- Install Ansible - - ``` - sudo zypper ref - sudo zypper -n install ansible - - ``` - -- Install Jinja2 and Python-Netaddr - - ```sudo zypper -n install python-Jinja2 python-netaddr``` - - -Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml deleted file mode 100644 index abedd2195..000000000 --- a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Install required packages (SUSE) - package: - name: "{{ item }}" - state: present - with_items: - - python-cryptography diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index c921b643e..01031deeb 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -11,9 +11,6 @@ - import_tasks: bootstrap-centos.yml when: bootstrap_os == "centos" -- import_tasks: bootstrap-opensuse.yml - when: bootstrap_os == "opensuse" - - import_tasks: setup-pipelining.yml - name: check if atomic host @@ -29,25 +26,18 @@ gather_subset: '!all' filter: ansible_* -- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed) +- name: Assign inventory name to unconfigured hostnames (non-CoreOS) hostname: name: "{{inventory_hostname}}" - when: - - override_system_hostname - - ansible_distribution not in ['openSUSE Tumbleweed'] - - ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] + when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname -- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only) +- name: Assign inventory name to unconfigured hostnames (CoreOS only) command: "hostnamectl set-hostname {{inventory_hostname}}" register: hostname_changed - when: - - ansible_hostname == 'localhost' - - ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] - - override_system_hostname + when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname -- name: Update hostname fact (CoreOS and Tumbleweed only) +- name: Update hostname fact (CoreOS only) setup: gather_subset: '!all' filter: ansible_hostname - when: - - hostname_changed.changed + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 3668f61b8..729397b44 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -15,14 +15,6 @@ tags: - facts -# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL -# openSUSE version so we can't use it. The only alternative is to use the docker -# packages from the distribution repositories. -- name: Warn about Docker version on SUSE - debug: - msg: "SUSE distributions always install Docker from the distro repos" - when: ansible_pkg_mgr == 'zypper' - - include_tasks: set_facts_dns.yml when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' tags: @@ -51,7 +43,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ docker_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) - name: ensure docker-ce repository is enabled action: "{{ docker_repo_info.pkg_repo }}" @@ -59,7 +51,7 @@ repo: "{{item}}" state: present with_items: "{{ docker_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0) - name: ensure docker-engine repository public key is installed action: "{{ dockerproject_repo_key_info.pkg_key }}" @@ -72,7 +64,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ dockerproject_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) - name: ensure docker-engine repository is enabled action: "{{ dockerproject_repo_info.pkg_repo }}" @@ -80,7 +72,7 @@ repo: "{{item}}" state: present with_items: "{{ dockerproject_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) - name: Configure docker repository on RedHat/CentOS template: @@ -118,12 +110,6 @@ notify: restart docker when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0) -- name: ensure service is started if docker packages are already present - service: - name: docker - state: started - when: docker_task_result is not changed - - name: flush handlers so we can wait for docker to come up meta: flush_handlers diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index 8dc82bbb2..d8efe2025 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -7,9 +7,6 @@ Wants=docker-storage-setup.service {% elif ansible_os_family == "Debian" %} After=network.target docker.socket Wants=docker.socket -{% elif ansible_os_family == "Suse" %} -After=network.target containerd.socket containerd.service -Requires=containerd.socket containerd.service {% endif %} [Service] @@ -22,9 +19,6 @@ ExecReload=/bin/kill -s HUP $MAINPID Delegate=yes KillMode=process ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \ -{% if ansible_os_family == "Suse" %} - --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \ -{% endif %} $DOCKER_OPTS \ $DOCKER_STORAGE_OPTIONS \ $DOCKER_NETWORK_OPTIONS \ diff --git a/roles/docker/vars/suse.yml b/roles/docker/vars/suse.yml deleted file mode 100644 index d89a50a7f..000000000 --- a/roles/docker/vars/suse.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -docker_kernel_min_version: '0' - -docker_package_info: - pkg_mgr: zypper - pkgs: - - name: docker - -docker_repo_key_info: - pkg_key: '' - repo_keys: [] - -docker_repo_info: - pkg_repo: '' - repos: [] diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml index 0ff363860..dd36554fb 100644 --- a/roles/etcd/tasks/upd_ca_trust.yml +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -8,8 +8,6 @@ /etc/pki/ca-trust/source/anchors/etcd-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/etcd-ca.pem - {%- elif ansible_os_family == "Suse" -%} - /etc/pki/trust/anchors/etcd-ca.pem {%- endif %} tags: - facts @@ -21,9 +19,9 @@ remote_src: true register: etcd_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) command: update-ca-certificates - when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 3bf847fb9..149cbb42a 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -8,7 +8,7 @@ epel_enabled: false common_required_pkgs: - python-httplib2 - - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}" + - openssl - curl - rsync - bash-completion diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 8df0ff9ee..8b9cbbe1a 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -158,15 +158,6 @@ - not is_atomic tags: bootstrap-os -- name: Update package management cache (zypper) - SUSE - shell: zypper -n --gpg-auto-import-keys ref - register: make_cache_output - until: make_cache_output|succeeded - retries: 4 - delay: "{{ retry_stagger | random + 3 }}" - when: - - ansible_pkg_mgr == 'zypper' - tags: bootstrap-os - name: Update package management cache (APT) apt: diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml index 5f647101d..8f0a2e854 100644 --- a/roles/kubernetes/preinstall/tasks/verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml @@ -12,7 +12,7 @@ - name: Stop if unknown OS assert: - that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed'] + that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS'] ignore_errors: "{{ ignore_assert_errors }}" - name: Stop if unknown network plugin @@ -94,4 +94,4 @@ assert: that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=') when: kube_network_plugin == 'cilium' - ignore_errors: "{{ ignore_assert_errors }}" + ignore_errors: "{{ ignore_assert_errors }}" \ No newline at end of file diff --git a/roles/kubernetes/preinstall/vars/suse.yml b/roles/kubernetes/preinstall/vars/suse.yml deleted file mode 100644 index 3f4f9aee9..000000000 --- a/roles/kubernetes/preinstall/vars/suse.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -required_pkgs: - - device-mapper - - ebtables diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml index cdd5f48fa..eec44987f 100644 --- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml +++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml @@ -8,8 +8,6 @@ /etc/pki/ca-trust/source/anchors/kube-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/kube-ca.pem - {%- elif ansible_os_family == "Suse" -%} - /etc/pki/trust/anchors/kube-ca.pem {%- endif %} tags: - facts @@ -21,9 +19,9 @@ remote_src: true register: kube_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) command: update-ca-certificates - when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] + when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml index cbaaf4085..599f9e50e 100644 --- a/roles/rkt/tasks/install.yml +++ b/roles/rkt/tasks/install.yml @@ -15,11 +15,22 @@ tags: - facts -- name: install rkt pkg - package: - name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" +- name: install rkt pkg on ubuntu + apt: + deb: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" state: present register: rkt_task_result until: rkt_task_result|succeeded retries: 4 delay: "{{ retry_stagger | random + 3 }}" + when: ansible_os_family == "Debian" + +- name: install rkt pkg on centos + yum: + pkg: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" + state: present + register: rkt_task_result + until: rkt_task_result|succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: ansible_os_family == "RedHat" diff --git a/roles/rkt/vars/suse.yml b/roles/rkt/vars/suse.yml deleted file mode 100644 index 13149e8fb..000000000 --- a/roles/rkt/vars/suse.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm" diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml deleted file mode 100644 index 9eae57e2e..000000000 --- a/tests/files/gce_opensuse-canal.yml +++ /dev/null @@ -1,12 +0,0 @@ -# Instance settings -cloud_image_family: opensuse-leap -cloud_region: us-central1-c -mode: default - -# Deployment settings -bootstrap_os: opensuse -kube_network_plugin: canal -kubeadm_enabled: true -deploy_netchecker: true -kubedns_min_replicas: 1 -cloud_provider: gce From 44cb126e7dbcbd3c9dea26ac07f827ca737e4df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20de=20Saint=20Martin?= Date: Tue, 24 Apr 2018 09:03:36 +0200 Subject: [PATCH 74/82] Update netchecker to v1.2.2. Using official image from mirantis at dockerhub. --- docs/netcheck.md | 4 ++-- roles/download/defaults/main.yml | 6 +++--- .../ansible/templates/netchecker-server-clusterrole.yml.j2 | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/netcheck.md b/docs/netcheck.md index 80679cd73..638e5cfce 100644 --- a/docs/netcheck.md +++ b/docs/netcheck.md @@ -25,8 +25,8 @@ There are related application specifc variables: netchecker_port: 31081 agent_report_interval: 15 netcheck_namespace: default -agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0" -server_img: "quay.io/l23network/k8s-netchecker-server:v1.0" +agent_img: "mirantis/k8s-netchecker-agent:v1.2.2" +server_img: "mirantis/k8s-netchecker-server:v1.2.2" ``` Note that the application verifies DNS resolve for FQDNs comprising only the diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 3fac21b71..a326440e6 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -92,10 +92,10 @@ pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" install_socat_image_tag: "latest" -netcheck_version: "v1.0" -netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent" +netcheck_version: "v1.2.2" +netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent" netcheck_agent_tag: "{{ netcheck_version }}" -netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server" +netcheck_server_img_repo: "mirantis/k8s-netchecker-server" netcheck_server_tag: "{{ netcheck_version }}" weave_kube_image_repo: "weaveworks/weave-kube" weave_kube_image_tag: "{{ weave_version }}" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 index 7a8c1d273..19bdc8b1f 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 @@ -7,3 +7,6 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["list"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ['*'] From 1a14f1ecc1e9061edde8c5a555c2fb6e277264ad Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 24 Apr 2018 20:32:08 +0300 Subject: [PATCH 75/82] Fix vol format for local volume provisioner in rkt (#2698) --- .../node/templates/kubelet.rkt.service.j2 | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 13cd9daae..b53102539 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -42,15 +42,14 @@ ExecStart=/usr/bin/rkt run \ {# surely there's a better way to do this #} {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \ -{% endif %} +{% endif -%} {% if local_volume_provisioner_enabled %} - --volume local_volume_provisioner_base_dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \ + --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \ {# Not pretty, but needed to avoid double mount #} {% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %} - --volume local_volume_provisioner_mount_dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \ + --volume local-volume-provisioner-mount-dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \ {% endif %} {% endif %} - {% if kubelet_load_modules == true %} --mount volume=modprobe,target=/usr/sbin/modprobe \ --mount volume=lib-modules,target=/lib/modules \ @@ -76,15 +75,14 @@ ExecStart=/usr/bin/rkt run \ {# surely there's a better way to do this #} {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %} --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ -{% endif %} +{% endif -%} {% if local_volume_provisioner_enabled %} - --mount local_volume_provisioner_base_dir,target={{ local_volume_provisioner_base_dir }} \ + --mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \ {# Not pretty, but needed to avoid double mount #} {% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %} - --volume local_volume_provisioner_mount_dir,target={{ local_volume_provisioner_mount_dir }} \ + --mount volume=local-volume-provisioner-mount-dir,target={{ local_volume_provisioner_mount_dir }} \ {% endif %} {% endif %} - --stage1-from-dir=stage1-fly.aci \ {% if kube_hyperkube_image_repo == "docker" %} --insecure-options=image \ From 9168c71359579ceee89ace587a42092e08f1e93c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 26 Apr 2018 10:52:06 +0100 Subject: [PATCH 76/82] Revert "Revert "Add openSUSE support" (#2697)" (#2699) This reverts commit 51f4e6585a2000bd226c42ffde813639e4154ac6. --- .gitlab-ci.yml | 15 +++++++++++++ README.md | 2 ++ Vagrantfile | 7 ++++-- docs/opensuse.md | 19 ++++++++++++++++ .../bootstrap-os/tasks/bootstrap-opensuse.yml | 7 ++++++ roles/bootstrap-os/tasks/main.yml | 22 ++++++++++++++----- roles/docker/tasks/main.yml | 22 +++++++++++++++---- roles/docker/templates/docker.service.j2 | 6 +++++ roles/docker/vars/suse.yml | 15 +++++++++++++ roles/etcd/tasks/upd_ca_trust.yml | 6 +++-- roles/kubernetes/preinstall/defaults/main.yml | 2 +- roles/kubernetes/preinstall/tasks/main.yml | 9 ++++++++ .../preinstall/tasks/verify-settings.yml | 4 ++-- roles/kubernetes/preinstall/vars/suse.yml | 4 ++++ .../kubernetes/secrets/tasks/upd_ca_trust.yml | 6 +++-- roles/rkt/tasks/install.yml | 10 +++++++++ roles/rkt/vars/suse.yml | 2 ++ tests/files/gce_opensuse-canal.yml | 12 ++++++++++ 18 files changed, 151 insertions(+), 19 deletions(-) create mode 100644 docs/opensuse.md create mode 100644 roles/bootstrap-os/tasks/bootstrap-opensuse.yml create mode 100644 roles/docker/vars/suse.yml create mode 100644 roles/kubernetes/preinstall/vars/suse.yml create mode 100644 roles/rkt/vars/suse.yml create mode 100644 tests/files/gce_opensuse-canal.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6a1eef6ab..e03e64017 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -308,6 +308,10 @@ before_script: # stage: deploy-special MOVED_TO_GROUP_VARS: "true" +.opensuse_canal_variables: &opensuse_canal_variables +# stage: deploy-part2 + MOVED_TO_GROUP_VARS: "true" + # Builds for PRs only (premoderated by unit-tests step) and triggers (auto) ### PR JOBS PART1 @@ -589,6 +593,17 @@ gce_centos7-calico-ha-triggers: when: on_success only: ['triggers'] +gce_opensuse-canal: + stage: deploy-part2 + <<: *job + <<: *gce + variables: + <<: *gce_variables + <<: *opensuse_canal_variables + when: manual + except: ['triggers'] + only: ['master', /^pr-.*$/] + # no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613 gce_coreos-alpha-weave-ha: stage: deploy-special diff --git a/README.md b/README.md index 081c28fe6..45a3515bf 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ Documents - [Vagrant install](docs/vagrant.md) - [CoreOS bootstrap](docs/coreos.md) - [Debian Jessie setup](docs/debian.md) +- [openSUSE setup](docs/opensuse.md) - [Downloaded artifacts](docs/downloads.md) - [Cloud providers](docs/cloud.md) - [OpenStack](docs/openstack.md) @@ -70,6 +71,7 @@ Supported Linux Distributions - **Ubuntu** 16.04 - **CentOS/RHEL** 7 - **Fedora/CentOS** Atomic +- **openSUSE** Leap 42.3/Tumbleweed Note: Upstart/SysV init based OS types are not supported. diff --git a/Vagrantfile b/Vagrantfile index 720e2419f..d0b6b73d1 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,6 +18,8 @@ SUPPORTED_OS = { "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, "centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"}, + "opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, + "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"}, } # Defaults for config options defined in CONFIG @@ -84,7 +86,6 @@ Vagrant.configure("2") do |config| if Vagrant.has_plugin?("vagrant-vbguest") then config.vbguest.auto_update = false end - (1..$num_instances).each do |i| config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| config.vm.hostname = vm_name @@ -110,8 +111,10 @@ Vagrant.configure("2") do |config| end end + config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] + $shared_folders.each do |src, dst| - config.vm.synced_folder src, dst + config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] end config.vm.provider :virtualbox do |vb| diff --git a/docs/opensuse.md b/docs/opensuse.md new file mode 100644 index 000000000..88fac3790 --- /dev/null +++ b/docs/opensuse.md @@ -0,0 +1,19 @@ +openSUSE Leap 42.3 and Tumbleweed +=============== + +openSUSE Leap installation Notes: + +- Install Ansible + + ``` + sudo zypper ref + sudo zypper -n install ansible + + ``` + +- Install Jinja2 and Python-Netaddr + + ```sudo zypper -n install python-Jinja2 python-netaddr``` + + +Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml new file mode 100644 index 000000000..abedd2195 --- /dev/null +++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml @@ -0,0 +1,7 @@ +--- +- name: Install required packages (SUSE) + package: + name: "{{ item }}" + state: present + with_items: + - python-cryptography diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 01031deeb..c921b643e 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -11,6 +11,9 @@ - import_tasks: bootstrap-centos.yml when: bootstrap_os == "centos" +- import_tasks: bootstrap-opensuse.yml + when: bootstrap_os == "opensuse" + - import_tasks: setup-pipelining.yml - name: check if atomic host @@ -26,18 +29,25 @@ gather_subset: '!all' filter: ansible_* -- name: Assign inventory name to unconfigured hostnames (non-CoreOS) +- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed) hostname: name: "{{inventory_hostname}}" - when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname + when: + - override_system_hostname + - ansible_distribution not in ['openSUSE Tumbleweed'] + - ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] -- name: Assign inventory name to unconfigured hostnames (CoreOS only) +- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only) command: "hostnamectl set-hostname {{inventory_hostname}}" register: hostname_changed - when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname + when: + - ansible_hostname == 'localhost' + - ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + - override_system_hostname -- name: Update hostname fact (CoreOS only) +- name: Update hostname fact (CoreOS and Tumbleweed only) setup: gather_subset: '!all' filter: ansible_hostname - when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed + when: + - hostname_changed.changed diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 729397b44..3668f61b8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -15,6 +15,14 @@ tags: - facts +# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL +# openSUSE version so we can't use it. The only alternative is to use the docker +# packages from the distribution repositories. +- name: Warn about Docker version on SUSE + debug: + msg: "SUSE distributions always install Docker from the distro repos" + when: ansible_pkg_mgr == 'zypper' + - include_tasks: set_facts_dns.yml when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' tags: @@ -43,7 +51,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ docker_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) - name: ensure docker-ce repository is enabled action: "{{ docker_repo_info.pkg_repo }}" @@ -51,7 +59,7 @@ repo: "{{item}}" state: present with_items: "{{ docker_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0) - name: ensure docker-engine repository public key is installed action: "{{ dockerproject_repo_key_info.pkg_key }}" @@ -64,7 +72,7 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ dockerproject_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) - name: ensure docker-engine repository is enabled action: "{{ dockerproject_repo_info.pkg_repo }}" @@ -72,7 +80,7 @@ repo: "{{item}}" state: present with_items: "{{ dockerproject_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) - name: Configure docker repository on RedHat/CentOS template: @@ -110,6 +118,12 @@ notify: restart docker when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0) +- name: ensure service is started if docker packages are already present + service: + name: docker + state: started + when: docker_task_result is not changed + - name: flush handlers so we can wait for docker to come up meta: flush_handlers diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index d8efe2025..8dc82bbb2 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -7,6 +7,9 @@ Wants=docker-storage-setup.service {% elif ansible_os_family == "Debian" %} After=network.target docker.socket Wants=docker.socket +{% elif ansible_os_family == "Suse" %} +After=network.target containerd.socket containerd.service +Requires=containerd.socket containerd.service {% endif %} [Service] @@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID Delegate=yes KillMode=process ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \ +{% if ansible_os_family == "Suse" %} + --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \ +{% endif %} $DOCKER_OPTS \ $DOCKER_STORAGE_OPTIONS \ $DOCKER_NETWORK_OPTIONS \ diff --git a/roles/docker/vars/suse.yml b/roles/docker/vars/suse.yml new file mode 100644 index 000000000..d89a50a7f --- /dev/null +++ b/roles/docker/vars/suse.yml @@ -0,0 +1,15 @@ +--- +docker_kernel_min_version: '0' + +docker_package_info: + pkg_mgr: zypper + pkgs: + - name: docker + +docker_repo_key_info: + pkg_key: '' + repo_keys: [] + +docker_repo_info: + pkg_repo: '' + repos: [] diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml index dd36554fb..0ff363860 100644 --- a/roles/etcd/tasks/upd_ca_trust.yml +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -8,6 +8,8 @@ /etc/pki/ca-trust/source/anchors/etcd-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/etcd-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/etcd-ca.pem {%- endif %} tags: - facts @@ -19,9 +21,9 @@ remote_src: true register: etcd_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) command: update-ca-certificates - when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 149cbb42a..3bf847fb9 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -8,7 +8,7 @@ epel_enabled: false common_required_pkgs: - python-httplib2 - - openssl + - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}" - curl - rsync - bash-completion diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 8b9cbbe1a..8df0ff9ee 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -158,6 +158,15 @@ - not is_atomic tags: bootstrap-os +- name: Update package management cache (zypper) - SUSE + shell: zypper -n --gpg-auto-import-keys ref + register: make_cache_output + until: make_cache_output|succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ansible_pkg_mgr == 'zypper' + tags: bootstrap-os - name: Update package management cache (APT) apt: diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml index 8f0a2e854..5f647101d 100644 --- a/roles/kubernetes/preinstall/tasks/verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml @@ -12,7 +12,7 @@ - name: Stop if unknown OS assert: - that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS'] + that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed'] ignore_errors: "{{ ignore_assert_errors }}" - name: Stop if unknown network plugin @@ -94,4 +94,4 @@ assert: that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=') when: kube_network_plugin == 'cilium' - ignore_errors: "{{ ignore_assert_errors }}" \ No newline at end of file + ignore_errors: "{{ ignore_assert_errors }}" diff --git a/roles/kubernetes/preinstall/vars/suse.yml b/roles/kubernetes/preinstall/vars/suse.yml new file mode 100644 index 000000000..3f4f9aee9 --- /dev/null +++ b/roles/kubernetes/preinstall/vars/suse.yml @@ -0,0 +1,4 @@ +--- +required_pkgs: + - device-mapper + - ebtables diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml index eec44987f..cdd5f48fa 100644 --- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml +++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml @@ -8,6 +8,8 @@ /etc/pki/ca-trust/source/anchors/kube-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} /etc/ssl/certs/kube-ca.pem + {%- elif ansible_os_family == "Suse" -%} + /etc/pki/trust/anchors/kube-ca.pem {%- endif %} tags: - facts @@ -19,9 +21,9 @@ remote_src: true register: kube_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) command: update-ca-certificates - when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"] - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml index 599f9e50e..f881a81fe 100644 --- a/roles/rkt/tasks/install.yml +++ b/roles/rkt/tasks/install.yml @@ -34,3 +34,13 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" when: ansible_os_family == "RedHat" + +- name: install rkt pkg on openSUSE + zypper: + name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}" + state: present + register: rkt_task_result + until: rkt_task_result|succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: ansible_os_family == "Suse" diff --git a/roles/rkt/vars/suse.yml b/roles/rkt/vars/suse.yml new file mode 100644 index 000000000..13149e8fb --- /dev/null +++ b/roles/rkt/vars/suse.yml @@ -0,0 +1,2 @@ +--- +rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm" diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml new file mode 100644 index 000000000..9eae57e2e --- /dev/null +++ b/tests/files/gce_opensuse-canal.yml @@ -0,0 +1,12 @@ +# Instance settings +cloud_image_family: opensuse-leap +cloud_region: us-central1-c +mode: default + +# Deployment settings +bootstrap_os: opensuse +kube_network_plugin: canal +kubeadm_enabled: true +deploy_netchecker: true +kubedns_min_replicas: 1 +cloud_provider: gce From c3c5817af6095624a6de0e9731628de0217a7637 Mon Sep 17 00:00:00 2001 From: mirwan Date: Fri, 27 Apr 2018 17:50:58 +0200 Subject: [PATCH 77/82] sysctl file should be in defaults so that it can be overriden (#2475) * sysctl file should be in defaults so that it can be overriden * Change sysctl_file_path to be consistent with roles/kubernetes/preinstall/defaults/main.yml --- roles/kubernetes/node/defaults/main.yml | 2 ++ roles/kubernetes/node/tasks/main.yml | 2 ++ roles/kubernetes/preinstall/defaults/main.yml | 2 ++ roles/kubernetes/preinstall/tasks/main.yml | 6 ------ 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index e1a1f1777..35a364d21 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -93,6 +93,8 @@ kube_cadvisor_port: 0 # The read-only port for the Kubelet to serve on with no authentication/authorization. kube_read_only_port: 0 +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" # For the openstack integration kubelet will need credentials to access # openstack apis like nova and cinder. Per default this values will be diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 13cc0740d..f7520caf8 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -61,6 +61,7 @@ name: net.ipv4.ip_local_reserved_ports value: "{{ kube_apiserver_node_port_range }}" sysctl_set: yes + sysctl_file: "{{ sysctl_file_path }}" state: present reload: yes when: kube_apiserver_node_port_range is defined @@ -96,6 +97,7 @@ sysctl: name: "{{ item }}" state: present + sysctl_file: "{{ sysctl_file_path }}" value: 1 reload: yes when: sysctl_bridge_nf_call_iptables.rc == 0 diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 3bf847fb9..4e4b892b1 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -31,3 +31,5 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf populate_inventory_to_hosts_file: true preinstall_selinux_state: permissive + +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 8df0ff9ee..0a994e8e2 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -245,12 +245,6 @@ tags: - bootstrap-os -- name: set default sysctl file path - set_fact: - sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" - tags: - - bootstrap-os - - name: Stat sysctl file configuration stat: path: "{{sysctl_file_path}}" From 06cdb260f62d994aaa23bc60a7ffa12e9451117a Mon Sep 17 00:00:00 2001 From: mirwan Date: Sun, 29 Apr 2018 18:02:14 +0200 Subject: [PATCH 78/82] labelvalue must be formatted to handle non string values (#2722) --- roles/kubernetes/node/templates/kubelet.standard.env.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 31a72f518..19100c1a7 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -95,9 +95,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {% set inventory_node_labels = [] %} {% if node_labels is defined %} -{% for labelname, labelvalue in node_labels.iteritems() %} -{% set dummy = inventory_node_labels.append(labelname + '=' + labelvalue) %} -{% endfor %} +{% for labelname, labelvalue in node_labels.iteritems() %} +{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %} +{% endfor %} {% endif %} {% set all_node_labels = role_node_labels + inventory_node_labels %} From 59789ae02a1eb31d5d7e6411c8b7d5735d1e8482 Mon Sep 17 00:00:00 2001 From: Tomasz Majchrowski <34726569+towasz@users.noreply.github.com> Date: Mon, 30 Apr 2018 13:48:17 +0200 Subject: [PATCH 79/82] ISSUE-2706: Provide consistent usage of supplementary_addresses_in_ssl_keys across vault and script mode (#2707) --- roles/kubernetes/secrets/tasks/gen_certs_vault.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index 05afdfcf8..8c9d12384 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -52,6 +52,11 @@ "{{ hostvars[host]['ip'] }}", {%- endif -%} {%- endfor -%} + {%- if supplementary_addresses_in_ssl_keys is defined -%} + {%- for ip_item in supplementary_addresses_in_ssl_keys -%} + "{{ ip_item }}", + {%- endfor -%} + {%- endif -%} "127.0.0.1","::1","{{ kube_apiserver_ip }}" ] issue_cert_path: "{{ item }}" @@ -112,6 +117,11 @@ "{{ hostvars[host]['ip'] }}", {%- endif -%} {%- endfor -%} + {%- if supplementary_addresses_in_ssl_keys is defined -%} + {%- for ip_item in supplementary_addresses_in_ssl_keys -%} + "{{ ip_item }}", + {%- endfor -%} + {%- endif -%} "127.0.0.1","::1","{{ kube_apiserver_ip }}" ] issue_cert_path: "{{ item }}" From df6c5b28a10ed8fbe1f5780ea501dd540b494529 Mon Sep 17 00:00:00 2001 From: Pablo Moreno Date: Mon, 30 Apr 2018 16:11:07 +0100 Subject: [PATCH 80/82] [contrib/terraform/openstack] Backward compatibility changes (#2539) * [terraform/openstack] Restores ability to use existing public nodes and masters as bastion. * [terraform/openstack] Uses network_id as output * [terraform/openstack] Fixes link to inventory/local/group_vars * [terraform/openstack] Adds supplementary master groups * [terraform/openstack] Updates documentation avoiding manual setups for bastion (as they are not needed now). * [terraform/openstack] Supplementary master groups in docs. * [terraform/openstack] Fixes repeated usage of master fips instead of bastion fips * [terraform/openstack] Missing change for network_id to subnet_id * [terraform/openstack] Changes conditional to element( concat ) form to avoid type issues with empty lists. --- contrib/terraform/group_vars | 2 +- contrib/terraform/openstack/README.md | 15 ++++++++---- contrib/terraform/openstack/kubespray.tf | 1 + .../openstack/modules/compute/main.tf | 23 +++++++++++++++---- .../openstack/modules/compute/variables.tf | 4 ++++ contrib/terraform/openstack/variables.tf | 5 ++++ 6 files changed, 39 insertions(+), 11 deletions(-) diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars index febd29cb3..4dd828e8e 120000 --- a/contrib/terraform/group_vars +++ b/contrib/terraform/group_vars @@ -1 +1 @@ -../../inventory/group_vars \ No newline at end of file +../../inventory/local/group_vars \ No newline at end of file diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index ed11bef1e..de717fb69 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -135,7 +135,7 @@ the one you want to use with the environment variable `OS_CLOUD`: export OS_CLOUD=mycloud ``` -##### Openrc method (deprecated) +##### Openrc method When using classic environment variables, Terraform uses default `OS_*` environment variables. A script suitable for your environment may be available @@ -218,6 +218,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`. |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | +|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. | #### Terraform state files @@ -299,11 +300,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil #### Bastion host -If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that. +Bastion access will be determined by: -``` -ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"' -``` + - Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable). + - The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables). + +If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned. +If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines. + +So, either a bastion host, or at least master/node with a floating IP are required. #### Test access diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf index e0dbfd02d..c501302de 100644 --- a/contrib/terraform/openstack/kubespray.tf +++ b/contrib/terraform/openstack/kubespray.tf @@ -48,6 +48,7 @@ module "compute" { k8s_master_fips = "${module.ips.k8s_master_fips}" k8s_node_fips = "${module.ips.k8s_node_fips}" bastion_fips = "${module.ips.bastion_fips}" + supplementary_master_groups = "${var.supplementary_master_groups}" network_id = "${module.network.router_id}" } diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index e0a8eab4a..940049aa9 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -83,7 +83,7 @@ resource "openstack_compute_instance_v2" "bastion" { } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml" + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml" } } @@ -107,10 +107,14 @@ resource "openstack_compute_instance_v2" "k8s_master" { metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "etcd,kube-master,k8s-cluster,vault" + kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" depends_on = "${var.network_id}" } + provisioner "local-exec" { + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml" + } + } resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { @@ -125,15 +129,20 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { } security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}", + "${openstack_compute_secgroup_v2.bastion.name}", "${openstack_compute_secgroup_v2.k8s.name}", ] metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "kube-master,k8s-cluster,vault" + kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" depends_on = "${var.network_id}" } + provisioner "local-exec" { + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml" + } + } resource "openstack_compute_instance_v2" "etcd" { @@ -175,7 +184,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating" + kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" depends_on = "${var.network_id}" } @@ -198,7 +207,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "kube-master,k8s-cluster,vault,no-floating" + kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" depends_on = "${var.network_id}" } @@ -226,6 +235,10 @@ resource "openstack_compute_instance_v2" "k8s_node" { depends_on = "${var.network_id}" } + provisioner "local-exec" { + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml" + } + } resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf index 518e15069..58ab17067 100644 --- a/contrib/terraform/openstack/modules/compute/variables.tf +++ b/contrib/terraform/openstack/modules/compute/variables.tf @@ -55,3 +55,7 @@ variable "k8s_node_fips" { variable "bastion_fips" { type = "list" } + +variable "supplementary_master_groups" { + default = "" +} diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf index 925750ab1..d49746c92 100644 --- a/contrib/terraform/openstack/variables.tf +++ b/contrib/terraform/openstack/variables.tf @@ -111,3 +111,8 @@ variable "floatingip_pool" { variable "external_net" { description = "uuid of the external/public network" } + +variable "supplementary_master_groups" { + description = "supplementary kubespray ansible groups for masters, such kube-node" + default = "" +} From 3501eb691615e86a7fe91a62fa265c2221c4cabe Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 1 May 2018 15:42:07 +0800 Subject: [PATCH 81/82] ingress-nginx: Upgrade to 0.14.0 --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index a326440e6..bcdd7295f 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -155,7 +155,7 @@ local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "a71a49d4" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" -ingress_nginx_controller_image_tag: "0.13.0" +ingress_nginx_controller_image_tag: "0.14.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" cert_manager_version: "v0.2.4" From 0fb017b9c10d978a4e826fb56a1ce2740658fd19 Mon Sep 17 00:00:00 2001 From: Giri Kuncoro Date: Wed, 2 May 2018 14:07:05 +0700 Subject: [PATCH 82/82] Rename ansible user env vars --- contrib/terraform/aws/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index d69811335..f62ba44cd 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -46,7 +46,7 @@ ssh -F ./ssh-bastion.conf user@$ip Example (this one assumes you are using CoreOS) ```commandline -ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache +ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache ``` ***Using other distrib than CoreOs*** If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.