From af8cc4dc4a9d39c84c11606388541d4ba3dcda74 Mon Sep 17 00:00:00 2001 From: TAKAHASHI Yuto Date: Mon, 8 May 2017 22:55:34 +0900 Subject: [PATCH 01/36] Typo --- library/kube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/kube.py b/library/kube.py index 2922c6212..fdc783fff 100644 --- a/library/kube.py +++ b/library/kube.py @@ -66,7 +66,7 @@ options: description: - present handles checking existence or creating if definition file provided, absent handles deleting resource(s) based on other options, - latest handles creating ore updating based on existence, + latest handles creating or updating based on existence, reloaded handles updating resource(s) definition using definition file, stopped handles stopping resource(s) based on other options. requirements: From 266ca9318d9ad735b0ac7ad95362b548caa57a54 Mon Sep 17 00:00:00 2001 From: Gregory Storme Date: Tue, 6 Jun 2017 18:36:04 +0200 Subject: [PATCH 02/36] Use the kube_apiserver_insecure_port variable instead of static 8080 --- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- roles/kubernetes/master/handlers/main.yml | 2 +- .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 9ec3b7ddc..4e7236df6 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver uri: - url: http://localhost:8080/healthz + url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz register: result until: result.status == 200 retries: 10 diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 94cec7d1b..e408ce04e 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -39,7 +39,7 @@ - name: Master | wait for the apiserver to be running uri: - url: http://localhost:8080/healthz + url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz register: result until: result.status == 200 retries: 20 diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 1cee58282..bf4979596 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -92,7 +92,7 @@ spec: httpGet: host: 127.0.0.1 path: /healthz - port: 8080 + port: {{ kube_apiserver_insecure_port }} initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: @@ -124,4 +124,4 @@ spec: - hostPath: path: /etc/ssl/certs/ca-bundle.crt name: rhel-ca-bundle -{% endif %} \ No newline at end of file +{% endif %} From fff0aec720e4dc6f5ee93f5fea90d7272c0e688a Mon Sep 17 00:00:00 2001 From: Gregory Storme Date: Wed, 14 Jun 2017 10:39:38 +0200 Subject: [PATCH 03/36] add configurable parameter for etcd_auto_compaction_retention --- inventory/group_vars/all.yml | 3 +++ roles/etcd/defaults/main.yml | 2 ++ roles/etcd/templates/etcd.env.yml | 1 + 3 files changed, 6 insertions(+) diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index a30055367..21b81a6de 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -98,3 +98,6 @@ bin_dir: /usr/local/bin ## Please specify true if you want to perform a kernel upgrade kernel_upgrade: false + +## Etcd auto compaction retention for mvcc key value store in hour +#etcd_compaction_retention: 0 diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 8fd72f3db..06714cd90 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -22,3 +22,5 @@ etcd_memory_limit: 512M #etcd_cpu_limit: 300m etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" + +etcd_compaction_retention: "0" diff --git a/roles/etcd/templates/etcd.env.yml b/roles/etcd/templates/etcd.env.yml index 07e200e03..115aa14b5 100644 --- a/roles/etcd/templates/etcd.env.yml +++ b/roles/etcd/templates/etcd.env.yml @@ -11,6 +11,7 @@ ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380 ETCD_NAME={{ etcd_member_name }} ETCD_PROXY=off ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} # TLS settings ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem From 7332679678dd91669e31cd92fd4243a7a9830652 Mon Sep 17 00:00:00 2001 From: AtzeDeVries Date: Tue, 20 Jun 2017 14:50:08 +0200 Subject: [PATCH 04/36] Give more control over IPIP, but with same default behaviour --- inventory/group_vars/k8s-cluster.yml | 5 +++++ roles/network_plugin/calico/defaults/main.yml | 3 ++- roles/network_plugin/calico/tasks/main.yml | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index ef5e363dc..5403614a6 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -71,6 +71,11 @@ kube_users: # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: calico +# In case of calico, override default behaviour to enable IPIP. Default behaviour is in case +# of a cloud_provider ipip:true and in case of no cloud provider ipip:false +# calico_ipip: true + + # Enable kubernetes network policies enable_network_policy: false diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 3ef70413f..88ab5b18e 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -2,8 +2,9 @@ # Enables Internet connectivity from containers nat_outgoing: true +# !! NOT USED in favour over calico_ipip # Use IP-over-IP encapsulation across hosts -ipip: false +# ipip: false # Set to true if you want your calico cni binaries to overwrite the # ones from hyperkube while leaving other cni plugins intact. diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index fa734464e..4c2030491 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -94,7 +94,7 @@ shell: > echo '{ "kind": "ipPool", - "spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}}, + "spec": {"disabled": false, "ipip": {"enabled": {{ calico_ipip is defined | ternary(calico_ipip, cloud_provider is defined) }}}, "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, "apiVersion": "v1", "metadata": {"cidr": "{{ kube_pods_subnet }}"} @@ -111,7 +111,7 @@ set_fact: ipip_arg: "--ipip" when: (legacy_calicoctl and - cloud_provider is defined or ipip) + calico_ipip is defined | ternary (calico_ipip, cloud_provider is defined)) tags: facts - name: Calico (old) | Define nat-outgoing pool argument From 61b74f9a5b7e12710efdc9523abe422fcb5e1d0f Mon Sep 17 00:00:00 2001 From: AtzeDeVries Date: Fri, 23 Jun 2017 09:16:05 +0200 Subject: [PATCH 05/36] updated to direct control over ipip --- inventory/group_vars/k8s-cluster.yml | 4 ---- roles/network_plugin/calico/defaults/main.yml | 3 +-- roles/network_plugin/calico/tasks/main.yml | 5 ++--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 5403614a6..5e633d6fe 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -71,10 +71,6 @@ kube_users: # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: calico -# In case of calico, override default behaviour to enable IPIP. Default behaviour is in case -# of a cloud_provider ipip:true and in case of no cloud provider ipip:false -# calico_ipip: true - # Enable kubernetes network policies enable_network_policy: false diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 88ab5b18e..598faf91b 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -2,9 +2,8 @@ # Enables Internet connectivity from containers nat_outgoing: true -# !! NOT USED in favour over calico_ipip # Use IP-over-IP encapsulation across hosts -# ipip: false +ipip: true # Set to true if you want your calico cni binaries to overwrite the # ones from hyperkube while leaving other cni plugins intact. diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 4c2030491..716086aed 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -94,7 +94,7 @@ shell: > echo '{ "kind": "ipPool", - "spec": {"disabled": false, "ipip": {"enabled": {{ calico_ipip is defined | ternary(calico_ipip, cloud_provider is defined) }}}, + "spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}}, "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, "apiVersion": "v1", "metadata": {"cidr": "{{ kube_pods_subnet }}"} @@ -110,8 +110,7 @@ run_once: true set_fact: ipip_arg: "--ipip" - when: (legacy_calicoctl and - calico_ipip is defined | ternary (calico_ipip, cloud_provider is defined)) + when: (legacy_calicoctl and ipip ) tags: facts - name: Calico (old) | Define nat-outgoing pool argument From 5c1891ec9ffdb91eeb685e1009bc3237e8e4f8e0 Mon Sep 17 00:00:00 2001 From: gdmelloatpoints Date: Fri, 23 Jun 2017 13:49:31 -0400 Subject: [PATCH 06/36] In the etcd container, the etcd data directory is always /var/lib/etcd. Reverting to this value, since `etcd_data_dir` on the host maps to `/var/lib/etcd` in the container. --- roles/etcd/templates/etcd.env.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/etcd/templates/etcd.env.yml b/roles/etcd/templates/etcd.env.yml index 07e200e03..a4bb8d36a 100644 --- a/roles/etcd/templates/etcd.env.yml +++ b/roles/etcd/templates/etcd.env.yml @@ -1,4 +1,4 @@ -ETCD_DATA_DIR={{ etcd_data_dir }} +ETCD_DATA_DIR=/var/lib/etcd ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }} ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }} ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %} From 649654207fd7f42a6a575fe29a7c197b1ff8ee43 Mon Sep 17 00:00:00 2001 From: gdmelloatpoints Date: Tue, 27 Jun 2017 09:29:47 -0400 Subject: [PATCH 07/36] mount the etcd data directory in the container with the same path as on the host. --- roles/etcd/templates/etcd-rkt.service.j2 | 4 ++-- roles/etcd/templates/etcd.env.yml | 2 +- roles/etcd/templates/etcd.j2 | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/etcd/templates/etcd-rkt.service.j2 b/roles/etcd/templates/etcd-rkt.service.j2 index 4c7fd5be9..580804810 100644 --- a/roles/etcd/templates/etcd-rkt.service.j2 +++ b/roles/etcd/templates/etcd-rkt.service.j2 @@ -15,8 +15,8 @@ ExecStart=/usr/bin/rkt run \ --mount=volume=etc-ssl-certs,target=/etc/ssl/certs \ --volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \ --mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \ ---volume=var-lib-etcd,kind=host,source={{ etcd_data_dir }},readOnly=false \ ---mount=volume=var-lib-etcd,target=/var/lib/etcd \ +--volume=etcd-data-dir,kind=host,source={{ etcd_data_dir }},readOnly=false \ +--mount=volume=etcd-data-dir,target={{ etcd_data_dir }} \ --set-env-file=/etc/etcd.env \ --stage1-from-dir=stage1-fly.aci \ {{ etcd_image_repo }}:{{ etcd_image_tag }} \ diff --git a/roles/etcd/templates/etcd.env.yml b/roles/etcd/templates/etcd.env.yml index a4bb8d36a..07e200e03 100644 --- a/roles/etcd/templates/etcd.env.yml +++ b/roles/etcd/templates/etcd.env.yml @@ -1,4 +1,4 @@ -ETCD_DATA_DIR=/var/lib/etcd +ETCD_DATA_DIR={{ etcd_data_dir }} ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }} ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }} ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %} diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2 index 479d85af8..b0780573c 100644 --- a/roles/etcd/templates/etcd.j2 +++ b/roles/etcd/templates/etcd.j2 @@ -5,7 +5,7 @@ --net=host \ -v /etc/ssl/certs:/etc/ssl/certs:ro \ -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ - -v {{ etcd_data_dir }}:/var/lib/etcd:rw \ + -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \ {% if etcd_memory_limit is defined %} --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ {% endif %} From 007ee0da8e2d6c987541fc68f22936b4c4134c22 Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Thu, 29 Jun 2017 14:45:15 +0800 Subject: [PATCH 08/36] fix reset --- roles/reset/tasks/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 96984f92b..af3e66601 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -83,6 +83,15 @@ - /etc/dhcp/dhclient.d/zdnsupdate.sh - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate - "{{ bin_dir }}/kubelet" + - "{{ bin_dir }}/kubernetes-scripts" + - /run/flannel + - /etc/flannel + - /run/kubernetes + - /usr/local/share/ca-certificates/kube-ca.crt + - /usr/local/share/ca-certificates/etcd-ca.crt + - /etc/ssl/certs/kube-ca.pem + - /etc/ssl/certs/etcd-ca.pem + - /var/log/pods/ tags: ['files'] From c8258171ca4253b2dde753e83631c3d12176da43 Mon Sep 17 00:00:00 2001 From: Anton Nerozya Date: Thu, 29 Jun 2017 19:46:27 +0200 Subject: [PATCH 09/36] Better naming for recurrent tasks --- roles/vault/tasks/bootstrap/ca_trust.yml | 2 +- roles/vault/tasks/shared/issue_cert.yml | 6 +++--- roles/vault/tasks/shared/sync.yml | 4 ++-- roles/vault/tasks/shared/sync_file.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml index 63ab256d5..ae67f7405 100644 --- a/roles/vault/tasks/bootstrap/ca_trust.yml +++ b/roles/vault/tasks/bootstrap/ca_trust.yml @@ -1,6 +1,6 @@ --- -- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first +- name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}" command: "cat {{ vault_cert_dir }}/ca.pem" register: vault_cert_file_cat delegate_to: "{{ groups['vault']|first }}" diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 0733e86a0..cb3685bf5 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -26,7 +26,7 @@ mode: "{{ issue_cert_dir_mode | d('0755') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: issue_cert | Generate the cert +- name: "issue_cert | Generate the cert for {{ issue_cert_role }}" uri: url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}" headers: "{{ issue_cert_headers }}" @@ -40,7 +40,7 @@ register: issue_cert_result when: inventory_hostname == issue_cert_hosts|first -- name: issue_cert | Copy the cert to all hosts +- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts" copy: content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}" dest: "{{ issue_cert_path }}" @@ -48,7 +48,7 @@ mode: "{{ issue_cert_file_mode | d('0644') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: issue_cert | Copy the key to all hosts +- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts" copy: content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}" diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml index bbfedbc4c..102532f0c 100644 --- a/roles/vault/tasks/shared/sync.yml +++ b/roles/vault/tasks/shared/sync.yml @@ -28,7 +28,7 @@ state: directory when: inventory_hostname not in sync_file_srcs -- name: "sync_file | Copy the file to hosts that don't have it" +- name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it" copy: content: "{{ sync_file_contents }}" dest: "{{ sync_file_path }}" @@ -37,7 +37,7 @@ owner: "{{ sync_file_owner|d('root') }}" when: inventory_hostname not in sync_file_srcs -- name: "sync_file | Copy the key file to hosts that don't have it" +- name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it" copy: content: "{{ sync_file_key_contents }}" dest: "{{ sync_file_key_path }}" diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml index ef53e9d90..be5284154 100644 --- a/roles/vault/tasks/shared/sync_file.yml +++ b/roles/vault/tasks/shared/sync_file.yml @@ -19,12 +19,12 @@ when: >- sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '') -- name: "sync_file | Check if file exists" +- name: "sync_file | Check if {{sync_file_path}} file exists" stat: path: "{{ sync_file_path }}" register: sync_file_stat -- name: "sync_file | Check if key file exists" +- name: "sync_file | Check if {{ sync_file_key_path }} key file exists" stat: path: "{{ sync_file_key_path }}" register: sync_file_key_stat From 1fedbded626c9b9aa38f26812b9ca55df13cf084 Mon Sep 17 00:00:00 2001 From: Anton Nerozya Date: Thu, 29 Jun 2017 20:13:02 +0200 Subject: [PATCH 10/36] ignore_errors instead of failed_when: false --- roles/etcd/tasks/configure.yml | 2 +- roles/etcd/tasks/set_cluster_health.yml | 2 +- roles/vault/tasks/shared/check_vault.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index db67f706c..4546d7896 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -2,7 +2,7 @@ - name: Configure | Check if member is in cluster shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}" register: etcd_member_in_cluster - failed_when: false + ignore_errors: true changed_when: false check_mode: no when: is_etcd_master diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml index cdc87787a..8cf5e8867 100644 --- a/roles/etcd/tasks/set_cluster_health.yml +++ b/roles/etcd/tasks/set_cluster_health.yml @@ -2,7 +2,7 @@ - name: Configure | Check if cluster is healthy shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'" register: etcd_cluster_is_healthy - failed_when: false + ignore_errors: true changed_when: false check_mode: no when: is_etcd_master diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml index 8a81ae5f2..2440c9508 100644 --- a/roles/vault/tasks/shared/check_vault.yml +++ b/roles/vault/tasks/shared/check_vault.yml @@ -14,7 +14,7 @@ headers: "{{ vault_client_headers }}" status_code: 200,429,500,501 validate_certs: no - failed_when: false + ignore_errors: true register: vault_local_service_health - name: check_vault | Set facts about local Vault health From c2b3920b50ccc9aff694fc31935a1b22ab0283fd Mon Sep 17 00:00:00 2001 From: Martin Joehren Date: Fri, 30 Jun 2017 12:17:03 +0000 Subject: [PATCH 11/36] added flag for not populating inventory entries to etc hosts file --- roles/kubernetes/preinstall/defaults/main.yml | 5 ++++- roles/kubernetes/preinstall/tasks/etchosts.yml | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index dd5cbf810..686c0e9bc 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" # For the vsphere integration, kubelet will need credentials to access # vsphere apis -# Documentation regarting these values can be found +# Documentation regarding these values can be found # https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" @@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # for hostnet pods and infra needs resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf + +# All inventory hostnames will be written into each /etc/hosts file. +populate_inventory_to_hosts_file: true diff --git a/roles/kubernetes/preinstall/tasks/etchosts.yml b/roles/kubernetes/preinstall/tasks/etchosts.yml index df330be08..69496b7c2 100644 --- a/roles/kubernetes/preinstall/tasks/etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/etchosts.yml @@ -9,6 +9,7 @@ create: yes backup: yes marker: "# Ansible inventory hosts {mark}" + when: populate_inventory_to_hosts_file - name: Hosts | populate kubernetes loadbalancer address into hosts file lineinfile: From 46d3f4369e14f2443dae03e40609c986e0a7fd70 Mon Sep 17 00:00:00 2001 From: Amit Kumar Jaiswal Date: Mon, 3 Jul 2017 04:06:42 +0530 Subject: [PATCH 12/36] Updated K8s version Signed-off-by: Amit Kumar Jaiswal --- roles/kubernetes/node/templates/kubelet.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index d2959b8a6..829d1f0be 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -13,7 +13,7 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \ --kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \ --node-status-update-frequency={{ kubelet_status_update_frequency }} \ -{% if kube_version | version_compare('v1.6', '>=') %} +{% if kube_version | version_compare('v1.7', '>=') %} --enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \ --enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %} From c36fb5919a2f3018c468dde91925aa389f36e609 Mon Sep 17 00:00:00 2001 From: Amit Kumar Jaiswal Date: Mon, 3 Jul 2017 15:55:04 +0530 Subject: [PATCH 13/36] Update kubelet.j2 Updated!! --- roles/kubernetes/node/templates/kubelet.j2 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 829d1f0be..9bead7a39 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -13,7 +13,10 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \ --kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \ --node-status-update-frequency={{ kubelet_status_update_frequency }} \ -{% if kube_version | version_compare('v1.7', '>=') %} +{% if kube_version | version_compare('v1.6', '>=') %} +{# flag got removed with 1.7.0 #} +{% if kube_version | version_compare('v1.7', '<') %} +--enable-cri={{ kubelet_enable_cri }} {% endif %} --enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \ --enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %} From 3d2680a1025091c2be0fbff2f01d69f0dbeccd82 Mon Sep 17 00:00:00 2001 From: Amit Kumar Jaiswal Date: Mon, 3 Jul 2017 15:58:50 +0530 Subject: [PATCH 14/36] Update kubelet.j2 Updated! --- roles/kubernetes/node/templates/kubelet.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 9bead7a39..a8174c769 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -16,7 +16,6 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" {% if kube_version | version_compare('v1.6', '>=') %} {# flag got removed with 1.7.0 #} {% if kube_version | version_compare('v1.7', '<') %} ---enable-cri={{ kubelet_enable_cri }} {% endif %} --enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \ --enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %} From 319a0d65af2e0fac4e64257f30a7e63fd4da4760 Mon Sep 17 00:00:00 2001 From: Amit Kumar Jaiswal Date: Mon, 3 Jul 2017 16:23:35 +0530 Subject: [PATCH 15/36] Update kubelet.j2 Updated with closing endif. --- roles/kubernetes/node/templates/kubelet.j2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index a8174c769..8c0ea94b5 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -16,7 +16,9 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" {% if kube_version | version_compare('v1.6', '>=') %} {# flag got removed with 1.7.0 #} {% if kube_version | version_compare('v1.7', '<') %} ---enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \ +--enable-cri={{ kubelet_enable_cri }} \ +{% endif %} +--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \ --enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %} {# DNS settings for kubelet #} From 5f75d4c09905ab437cbd61935eca3c247064851c Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Tue, 27 Jun 2017 22:11:44 +0200 Subject: [PATCH 16/36] Uncodron Masters which have scheduling Enabled --- roles/upgrade/post-upgrade/tasks/main.yml | 3 ++- upgrade-cluster.yml | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index c32f42491..e7efa0601 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -3,4 +3,5 @@ - name: Uncordon node command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" - when: needs_cordoning|default(false) + when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} ) + diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 09f268ecf..1a66904ce 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -67,6 +67,7 @@ - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } - { role: network_plugin, tags: network } + - { role: upgrade/post-upgrade, tags: post-upgrade } #Finally handle worker upgrades, based on given batch size - hosts: kube-node:!kube-master From 38f5d1b18e6ddb7e1b670cb432310f8d9c061c13 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 14 Jun 2017 12:37:35 +0200 Subject: [PATCH 17/36] Set kubedns minimum replicas to 2 --- roles/kubernetes-apps/ansible/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 3d2e7a419..2787472c8 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -6,7 +6,7 @@ kubednsautoscaler_version: 1.1.1 dns_memory_limit: 170Mi dns_cpu_requests: 100m dns_memory_requests: 70Mi -kubedns_min_replicas: 1 +kubedns_min_replicas: 2 kubedns_nodes_per_replica: 10 # Images From 5df757a4039fb4ba8ebb222a022b73c0064cb4b1 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 21 Jun 2017 10:37:13 +0200 Subject: [PATCH 18/36] Correct indentation and line endings for gitlab config --- .gitlab-ci.yml | 126 ++++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b64dd2a4e..9b890870b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -92,7 +92,7 @@ before_script: - echo ${PWD} - echo "${STARTUP_SCRIPT}" - > - ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local + ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local ${LOG_LEVEL} -e cloud_image=${CLOUD_IMAGE} -e cloud_region=${CLOUD_REGION} @@ -118,7 +118,7 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e ansible_python_interpreter=${PYPATH} - -e ansible_ssh_user=${SSH_USER} + -e ansible_ssh_user=${SSH_USER} -e bootstrap_os=${BOOTSTRAP_OS} -e cert_management=${CERT_MGMT:-script} -e cloud_provider=gce @@ -136,30 +136,30 @@ before_script: # Repeat deployment if testing upgrade - > - if [ "${UPGRADE_TEST}" != "false" ]; then + if [ "${UPGRADE_TEST}" != "false" ]; then test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; - pip install ansible==2.3.0; - git checkout "${CI_BUILD_REF}"; - ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER - ${SSH_ARGS} - ${LOG_LEVEL} - -e ansible_python_interpreter=${PYPATH} - -e ansible_ssh_user=${SSH_USER} - -e bootstrap_os=${BOOTSTRAP_OS} - -e cloud_provider=gce - -e deploy_netchecker=true - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - -e local_release_dir=${PWD}/downloads - -e resolvconf_mode=${RESOLVCONF_MODE} - -e weave_cpu_requests=${WEAVE_CPU_LIMIT} - -e weave_cpu_limit=${WEAVE_CPU_LIMIT} - --limit "all:!fake_hosts" - $PLAYBOOK; + pip install ansible==2.3.0; + git checkout "${CI_BUILD_REF}"; + ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER + ${SSH_ARGS} + ${LOG_LEVEL} + -e ansible_python_interpreter=${PYPATH} + -e ansible_ssh_user=${SSH_USER} + -e bootstrap_os=${BOOTSTRAP_OS} + -e cloud_provider=gce + -e deploy_netchecker=true + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + -e local_release_dir=${PWD}/downloads + -e resolvconf_mode=${RESOLVCONF_MODE} + -e weave_cpu_requests=${WEAVE_CPU_LIMIT} + -e weave_cpu_limit=${WEAVE_CPU_LIMIT} + --limit "all:!fake_hosts" + $PLAYBOOK; fi # Tests Cases @@ -175,40 +175,40 @@ before_script: ## Idempotency checks 1/5 (repeat deployment) - > if [ "${IDEMPOT_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e deploy_netchecker=true - -e resolvconf_mode=${RESOLVCONF_MODE} - -e local_release_dir=${PWD}/downloads - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e deploy_netchecker=true + -e resolvconf_mode=${RESOLVCONF_MODE} + -e local_release_dir=${PWD}/downloads + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + --limit "all:!fake_hosts" cluster.yml; fi ## Idempotency checks 2/5 (Advanced DNS checks) - > if [ "${IDEMPOT_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} - -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} + -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root + --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL; fi ## Idempotency checks 3/5 (reset deployment) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e reset_confirmation=yes + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml; fi @@ -216,28 +216,28 @@ before_script: ## Idempotency checks 4/5 (redeploy after reset) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e deploy_netchecker=true - -e resolvconf_mode=${RESOLVCONF_MODE} - -e local_release_dir=${PWD}/downloads - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e deploy_netchecker=true + -e resolvconf_mode=${RESOLVCONF_MODE} + -e local_release_dir=${PWD}/downloads + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + --limit "all:!fake_hosts" cluster.yml; fi ## Idempotency checks 5/5 (Advanced DNS checks) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} - -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} + -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root + --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL; fi @@ -603,7 +603,7 @@ ci-authorized: script: - /bin/sh scripts/premoderator.sh except: ['triggers', 'master'] - + syntax-check: <<: *job stage: unit-tests From 6bd27038ccd5a98c8812355b5d89d42b2c013977 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 21 Jun 2017 10:38:25 +0200 Subject: [PATCH 19/36] Set kubedns min replicas to 1 in gitlab config --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9b890870b..259c45614 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -127,6 +127,7 @@ before_script: -e download_run_once=${DOWNLOAD_RUN_ONCE} -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e local_release_dir=${PWD}/downloads -e resolvconf_mode=${RESOLVCONF_MODE} @@ -153,6 +154,7 @@ before_script: -e download_run_once=${DOWNLOAD_RUN_ONCE} -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e local_release_dir=${PWD}/downloads -e resolvconf_mode=${RESOLVCONF_MODE} @@ -186,6 +188,7 @@ before_script: -e resolvconf_mode=${RESOLVCONF_MODE} -e local_release_dir=${PWD}/downloads -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} --limit "all:!fake_hosts" cluster.yml; @@ -227,6 +230,7 @@ before_script: -e resolvconf_mode=${RESOLVCONF_MODE} -e local_release_dir=${PWD}/downloads -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} --limit "all:!fake_hosts" cluster.yml; From a742d10c54116538f50b35a2379ef8efcad3c3fb Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Tue, 4 Jul 2017 19:05:16 -0400 Subject: [PATCH 20/36] Allow calico ipPool to be created with mode "cross-subnet" --- roles/network_plugin/calico/defaults/main.yml | 1 + roles/network_plugin/calico/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 3ef70413f..8cd120234 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -4,6 +4,7 @@ nat_outgoing: true # Use IP-over-IP encapsulation across hosts ipip: false +ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets # Set to true if you want your calico cni binaries to overwrite the # ones from hyperkube while leaving other cni plugins intact. diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index fa734464e..cdd17ffa6 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -94,7 +94,7 @@ shell: > echo '{ "kind": "ipPool", - "spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}}, + "spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}, "mode": "{{ ipip_mode }}"}, "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, "apiVersion": "v1", "metadata": {"cidr": "{{ kube_pods_subnet }}"} From e26be9cb8a8119ea7cb4cf9209e782e667122323 Mon Sep 17 00:00:00 2001 From: Vladimir Kozyrev Date: Wed, 31 May 2017 12:11:47 +0300 Subject: [PATCH 21/36] add private dns server for a specific zone --- inventory/group_vars/k8s-cluster.yml | 5 +++++ roles/dnsmasq/defaults/main.yml | 3 +++ roles/dnsmasq/templates/01-kube-dns.conf.j2 | 5 +++++ 3 files changed, 13 insertions(+) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index ef5e363dc..350be8240 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -132,3 +132,8 @@ efk_enabled: false # Helm deployment helm_enabled: false + +# dnsmasq +# dnsmasq_upstream_dns_servers: +# - /resolvethiszone.with/10.0.4.250 +# - 8.8.8.8 diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml index 58b1b7f1d..bf670c788 100644 --- a/roles/dnsmasq/defaults/main.yml +++ b/roles/dnsmasq/defaults/main.yml @@ -30,3 +30,6 @@ dns_memory_requests: 50Mi # Autoscaler parameters dnsmasq_nodes_per_replica: 10 dnsmasq_min_replicas: 1 + +# Custom name servers +dnsmasq_upstream_dns_servers: [] diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2 index dce26d726..483be2090 100644 --- a/roles/dnsmasq/templates/01-kube-dns.conf.j2 +++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2 @@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }} local=/{{ bogus_domains }} #Set upstream dns servers +{% if dnsmasq_upstream_dns_servers|length > 0 %} +{% for srv in dnsmasq_upstream_dns_servers %} +server={{ srv }} +{% endfor %} +{% endif %} {% if system_and_upstream_dns_servers|length > 0 %} {% for srv in system_and_upstream_dns_servers %} server={{ srv }} From e7f794531eebb8481e21cc66d3899a52e0f727df Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Fri, 7 Jul 2017 09:20:14 +0200 Subject: [PATCH 22/36] make sure every instance is a node if user changed defauls of num_instances --- Vagrantfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index ab8073280..09419aa37 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -30,8 +30,6 @@ $os = "ubuntu" $etcd_instances = $num_instances # The first two nodes are masters $kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1) -# All nodes are kube nodes -$kube_node_instances = $num_instances $local_release_dir = "/vagrant/temp" host_vars = {} @@ -40,6 +38,9 @@ if File.exist?(CONFIG) require CONFIG end +# All nodes are kube nodes +$kube_node_instances = $num_instances + $box = SUPPORTED_OS[$os][:box] # if $inventory is not set, try to use example $inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory From d1a02bd3e9f22d95213bd1d91c466124ea63ccc9 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 7 Jul 2017 13:13:12 -0400 Subject: [PATCH 23/36] match kubespray-defaults dns mode with k8s-cluster setting --- roles/kubespray-defaults/defaults/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index f0323d479..c18afd39b 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -22,7 +22,7 @@ cluster_name: cluster.local # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods ndots: 2 # Can be dnsmasq_kubedns, kubedns or none -dns_mode: dnsmasq_kubedns +dns_mode: kubedns # Can be docker_dns, host_resolvconf or none resolvconf_mode: docker_dns # Deploy netchecker app to verify DNS resolve as an HTTP service From 22d600e8c0e4bc60f182bde25dac0831c23eb7cd Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Sun, 9 Jul 2017 09:56:32 +0200 Subject: [PATCH 24/36] fix azure kubernetes port to 6443 --- .../roles/generate-templates/templates/masters.json | 8 ++++---- .../roles/generate-templates/templates/network.json | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json index c85addac8..842d5fb33 100644 --- a/contrib/azurerm/roles/generate-templates/templates/masters.json +++ b/contrib/azurerm/roles/generate-templates/templates/masters.json @@ -62,8 +62,8 @@ "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" }, "protocol": "tcp", - "frontendPort": 443, - "backendPort": 443, + "frontendPort": 6443, + "backendPort": 6443, "enableFloatingIP": false, "idleTimeoutInMinutes": 5, "probe": { @@ -77,7 +77,7 @@ "name": "kube-api", "properties": { "protocol": "tcp", - "port": 443, + "port": 6443, "intervalInSeconds": 5, "numberOfProbes": 2 } @@ -193,4 +193,4 @@ } {% if not loop.last %},{% endif %} {% endfor %} ] -} \ No newline at end of file +} diff --git a/contrib/azurerm/roles/generate-templates/templates/network.json b/contrib/azurerm/roles/generate-templates/templates/network.json index 728adf138..32a55d6d8 100644 --- a/contrib/azurerm/roles/generate-templates/templates/network.json +++ b/contrib/azurerm/roles/generate-templates/templates/network.json @@ -92,7 +92,7 @@ "description": "Allow secure kube-api", "protocol": "Tcp", "sourcePortRange": "*", - "destinationPortRange": "443", + "destinationPortRange": "6443", "sourceAddressPrefix": "Internet", "destinationAddressPrefix": "*", "access": "Allow", @@ -106,4 +106,4 @@ "dependsOn": [] } ] -} \ No newline at end of file +} From 442be2ac020329634943fb7dc1d89bd0e73d6dcd Mon Sep 17 00:00:00 2001 From: Alexander Chumakov Date: Mon, 10 Jul 2017 18:53:57 +0300 Subject: [PATCH 25/36] [terraform/openstack] README.md Guide expanded Add section how to configure k8s cluster and set up kubectl --- contrib/terraform/openstack/README.md | 79 +++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index e98b8068a..70666058f 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -36,6 +36,8 @@ Ensure your OpenStack **Identity v2** credentials are loaded in environment vari $ source ~/.stackrc ``` +> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI + You will need two networks before installing, an internal network and an external (floating IP Pool) network. The internet network can be shared as we use security groups to provide network segregation. Due to the many @@ -99,6 +101,35 @@ ssh_user_gfs = "ubuntu" If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher. +# Configure Cluster variables + +Edit `inventory/group_vars/all.yml`: +- Set variable **bootstrap_os** according selected image +``` +# Valid bootstrap options (required): ubuntu, coreos, centos, none +bootstrap_os: coreos +``` +- And **bin_dir** +``` +# Directory where the binaries will be installed +# Default: +# bin_dir: /usr/local/bin +# For Container Linux by CoreOS: +bin_dir: /opt/bin +``` +Edit `inventory/group_vars/k8s-cluster.yml`: +- Set variable **kube_network_plugin** according selected networking +``` +# Choose network plugin (calico, weave or flannel) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: flannel +``` +> flannel works out-of-the-box + +> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports + +Configure OpenStack Neutron ports: +[OpenStack](/docs/openstack.md) # Provision a Kubernetes Cluster on OpenStack @@ -156,6 +187,54 @@ Deploy kubernetes: $ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml ``` +# Set up local kubectl +1. Install kubectl on your workstation: +[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +2. Add route to internal IP of master node (if needed): +``` +sudo route add [master-internal-ip] gw [router-ip] +``` +or +``` +sudo route add -net [internal-subnet]/24 gw [router-ip] +``` +3. List Kubernetes certs&keys: +``` +ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/ +``` +4. Get admin's certs&key: +``` +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem +ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem +``` +5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443 +6. Configure kubectl: +``` +kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ + --certificate-authority=ca.pem + +kubectl config set-credentials default-admin \ + --certificate-authority=ca.pem \ + --client-key=admin-key.pem \ + --client-certificate=admin.pem + +kubectl config set-credentials default-admin \ + --certificate-authority=ca.pem \ + --client-key=admin-key.pem \ + --client-certificate=admin.pem + +kubectl config set-context default-system --cluster=default-cluster --user=default-admin +kubectl config use-context default-system +``` +7. Check it: +``` +kubectl version +``` + +# What's next +[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/) + # clean up: ``` From ecaa7dad49b325458b14e33b8ab884bf700b322b Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Mon, 10 Jul 2017 20:16:02 +0200 Subject: [PATCH 26/36] add a variable for kube_apiserver at all --- contrib/azurerm/group_vars/all | 2 ++ .../azurerm/roles/generate-templates/templates/masters.json | 6 +++--- .../azurerm/roles/generate-templates/templates/network.json | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/azurerm/group_vars/all b/contrib/azurerm/group_vars/all index d7c49742a..9cecfd5ed 100644 --- a/contrib/azurerm/group_vars/all +++ b/contrib/azurerm/group_vars/all @@ -19,6 +19,8 @@ admin_username: devops admin_password: changeme ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy" +kube_apiserver_port: 6443 + # Azure CIDRs azure_vnet_cidr: 10.0.0.0/8 azure_admin_cidr: 10.241.2.0/24 diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json index 842d5fb33..ecfc72140 100644 --- a/contrib/azurerm/roles/generate-templates/templates/masters.json +++ b/contrib/azurerm/roles/generate-templates/templates/masters.json @@ -62,8 +62,8 @@ "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" }, "protocol": "tcp", - "frontendPort": 6443, - "backendPort": 6443, + "frontendPort": "{{kube_apiserver_port}}", + "backendPort": "{{kube_apiserver_port}}", "enableFloatingIP": false, "idleTimeoutInMinutes": 5, "probe": { @@ -77,7 +77,7 @@ "name": "kube-api", "properties": { "protocol": "tcp", - "port": 6443, + "port": "{{kube_apiserver_port}}", "intervalInSeconds": 5, "numberOfProbes": 2 } diff --git a/contrib/azurerm/roles/generate-templates/templates/network.json b/contrib/azurerm/roles/generate-templates/templates/network.json index 32a55d6d8..763b3dbb3 100644 --- a/contrib/azurerm/roles/generate-templates/templates/network.json +++ b/contrib/azurerm/roles/generate-templates/templates/network.json @@ -92,7 +92,7 @@ "description": "Allow secure kube-api", "protocol": "Tcp", "sourcePortRange": "*", - "destinationPortRange": "6443", + "destinationPortRange": "{{kube_apiserver_port}}", "sourceAddressPrefix": "Internet", "destinationAddressPrefix": "*", "access": "Allow", From 9f45eba6f62b4d39f4f65ae1d1ea3a105ab6e75a Mon Sep 17 00:00:00 2001 From: Delfer Date: Tue, 11 Jul 2017 09:11:55 +0000 Subject: [PATCH 27/36] Kubernetes upgrade to 1.6.7 --- README.md | 2 +- inventory/group_vars/k8s-cluster.yml | 2 +- roles/download/defaults/main.yml | 2 +- roles/kubespray-defaults/defaults/main.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 60252cfba..ef46bb272 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ Versions of supported components -------------------------------- -[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.4
+[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7
[etcd](https://github.com/coreos/etcd/releases) v3.0.17
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0
diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 350be8240..cc36160fd 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -23,7 +23,7 @@ kube_users_dir: "{{ kube_config_dir }}/users" kube_api_anonymous_auth: false ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.6.4 +kube_version: v1.6.7 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 334406a14..52cc491e1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -18,7 +18,7 @@ download_localhost: False download_always_pull: False # Versions -kube_version: v1.6.4 +kube_version: v1.6.7 etcd_version: v3.0.17 #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c18afd39b..6ecdaa9c9 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -4,7 +4,7 @@ bootstrap_os: none kube_api_anonymous_auth: false ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.6.4 +kube_version: v1.6.7 # Directory where the binaries will be installed bin_dir: /usr/local/bin From f3165a716af0f3c60a768d3f4258fbdc9ca42d04 Mon Sep 17 00:00:00 2001 From: Alexander Chumakov Date: Tue, 11 Jul 2017 12:46:19 +0300 Subject: [PATCH 28/36] Add more config to README.md Add resolvconf_mode and cloud_provider config description to README.md --- contrib/terraform/openstack/README.md | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 70666058f..dcc3ddec1 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -109,7 +109,7 @@ Edit `inventory/group_vars/all.yml`: # Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: coreos ``` -- And **bin_dir** +- **bin_dir** ``` # Directory where the binaries will be installed # Default: @@ -117,6 +117,10 @@ bootstrap_os: coreos # For Container Linux by CoreOS: bin_dir: /opt/bin ``` +- and **cloud_provider** +``` +cloud_provider: openstack +``` Edit `inventory/group_vars/k8s-cluster.yml`: - Set variable **kube_network_plugin** according selected networking ``` @@ -127,9 +131,16 @@ kube_network_plugin: flannel > flannel works out-of-the-box > calico requires allowing service's and pod's subnets on according OpenStack Neutron ports +- Set variable **resolvconf_mode** +``` +# Can be docker_dns, host_resolvconf or none +# Default: +# resolvconf_mode: docker_dns +# For Container Linux by CoreOS: +resolvconf_mode: host_resolvconf +``` -Configure OpenStack Neutron ports: -[OpenStack](/docs/openstack.md) +For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md) # Provision a Kubernetes Cluster on OpenStack From 5c7e309d13ec1c096ada65fd494a28a2ae8a2106 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Tue, 11 Jul 2017 10:53:19 -0400 Subject: [PATCH 29/36] Add more instructions to setting up AWS provider --- docs/aws.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/aws.md b/docs/aws.md index 8bdbc06fa..e1e81331e 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -5,6 +5,10 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. +You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`. + +Make sure your VPC has both DNS Hostnames support and Private DNS enabled. + The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. You can now create your cluster! From f4a3b3141508df665bfa6305b349a993e80ae680 Mon Sep 17 00:00:00 2001 From: nico Date: Wed, 12 Jul 2017 11:01:06 +0200 Subject: [PATCH 30/36] add vsphere cloud provider doc fix typo --- README.md | 1 + docs/vsphere.md | 61 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 docs/vsphere.md diff --git a/README.md b/README.md index 60252cfba..df1d33bf9 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@ To deploy the cluster you can use : * [OpenStack](docs/openstack.md) * [AWS](docs/aws.md) * [Azure](docs/azure.md) +* [vSphere](docs/vsphere.md) * [Large deployments](docs/large-deployments.md) * [Upgrades basics](docs/upgrades.md) * [Roadmap](docs/roadmap.md) diff --git a/docs/vsphere.md b/docs/vsphere.md new file mode 100644 index 000000000..5374304c1 --- /dev/null +++ b/docs/vsphere.md @@ -0,0 +1,61 @@ +# vSphere cloud provider + +Kubespray can be deployed with vSphere as Cloud provider. This feature supports +- Volumes +- Persistent Volumes +- Storage Classes and provisioning of volumes. +- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes. + +## Prerequisites + +You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider). + +After this step you should have: +- UUID activated for each VM where Kubernetes will be deployed +- A vSphere account with required privileges + +## Kubespray configuration + +Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`. +```yml +cloud_provider: vsphere +``` + +Then, in the same file, you need to declare your vCenter credential following the description bellow. + +| Variable | Required | Type | Choices | Default | Comment | +|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | +| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 | +| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert | +| vsphere_user | TRUE | string | | | User name for vCenter with required privileges | +| vsphere_password | TRUE | string | | | Password for vCenter | +| vsphere_datacenter | TRUE | string | | | Datacenter name to use | +| vsphere_datastore | TRUE | string | | | Datastore name to use | +| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed | +| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". | +| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` | +| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to | + +Example configuration +```yml +vsphere_vcenter_ip: "myvcenter.domain.com" +vsphere_vcenter_port: 443 +vsphere_insecure: 1 +vsphere_user: "k8s@vsphere.local" +vsphere_password: "K8s_admin" +vsphere_datacenter: "DATACENTER_name" +vsphere_datastore: "DATASTORE_name" +vsphere_working_dir: "Docker_hosts" +vsphere_scsi_controller_type: "pvscsi" +``` + +## Deployment + +Once the configuration is set, you can execute the playbook again to apply the new configuration +``` +cd kubespray +ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml +``` + +You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration. From e0bf8b2aabb18a2715ca7bb481984d8727038d9c Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 12 Jul 2017 09:28:54 -0500 Subject: [PATCH 31/36] Adding recursive=true for rkt kubelet dir Fixes #1434 --- roles/kubernetes/node/templates/kubelet.rkt.service.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 2c889d8c6..1f181a89d 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -30,7 +30,7 @@ ExecStart=/usr/bin/rkt run \ --volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \ {% endfor -%} --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \ - --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false \ + --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \ --volume var-log,kind=host,source=/var/log \ {% if kube_network_plugin in ["calico", "weave", "canal"] %} --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ From d1f58fed4c9a93c010fb3209decf4d764b72c8af Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Fri, 14 Jul 2017 09:27:20 -0400 Subject: [PATCH 32/36] Template out known_users.csv, optionally add groups --- inventory/group_vars/k8s-cluster.yml | 3 +++ roles/kubernetes/secrets/tasks/main.yml | 6 ++---- roles/kubernetes/secrets/templates/known_users.csv.j2 | 3 +++ 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 roles/kubernetes/secrets/templates/known_users.csv.j2 diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 65a8661d0..16ae6490e 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -39,6 +39,7 @@ kube_cert_group: kube-cert kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP +# Optionally add groups for user kube_api_pwd: "changeme" kube_users: kube: @@ -47,6 +48,8 @@ kube_users: root: pass: "{{kube_api_pwd}}" role: admin + # groups: + # - system:masters diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index fb4c38f38..5f55b775b 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -27,12 +27,10 @@ group: "{{ kube_cert_group }}" - name: Populate users for basic auth in API - lineinfile: + template: + src: known_users.csv.j2 dest: "{{ kube_users_dir }}/known_users.csv" - create: yes - line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' backup: yes - with_dict: "{{ kube_users }}" when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true) notify: set secret_changed diff --git a/roles/kubernetes/secrets/templates/known_users.csv.j2 b/roles/kubernetes/secrets/templates/known_users.csv.j2 new file mode 100644 index 000000000..3e792c52b --- /dev/null +++ b/roles/kubernetes/secrets/templates/known_users.csv.j2 @@ -0,0 +1,3 @@ +{% for user in kube_users %} +{{kube_users[user].pass}},{{user}},{{kube_users[user].role}}{% if kube_users[user].groups is defined %},{% set groups_csv = kube_users[user].groups|join(',') -%}"{{groups_csv}}"{% endif %} +{% endfor %} From 3bdeaa4a6f86d575e588e5e676a290b3559f6ee4 Mon Sep 17 00:00:00 2001 From: John Ko Date: Fri, 14 Jul 2017 15:25:09 -0400 Subject: [PATCH 33/36] fix typo 'on' > 'one' --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 0679667a1..e46f451d9 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -3,7 +3,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follows: 1. An issue is proposing a new release with a changelog since the last release -2. At least on of the [OWNERS](OWNERS) must LGTM this release +2. At least one of the [OWNERS](OWNERS) must LGTM this release 3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 4. The release issue is closed 5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released` From 5145a8e8befddbfcbfa79f35bbbd2f68118fbd04 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Sun, 16 Jul 2017 20:52:13 +0000 Subject: [PATCH 34/36] higher draining timeouts --- roles/upgrade/pre-upgrade/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/upgrade/pre-upgrade/defaults/main.yml b/roles/upgrade/pre-upgrade/defaults/main.yml index 5980360fc..c87b7e9ea 100644 --- a/roles/upgrade/pre-upgrade/defaults/main.yml +++ b/roles/upgrade/pre-upgrade/defaults/main.yml @@ -1,3 +1,3 @@ -drain_grace_period: 30 -drain_timeout: 40s +drain_grace_period: 90 +drain_timeout: 120s From 06b219217b8dd13943f87e7b5faeb6789625f31d Mon Sep 17 00:00:00 2001 From: John Ko Date: Tue, 18 Jul 2017 10:44:08 -0400 Subject: [PATCH 35/36] fix some typos in HA doc --- docs/ha-mode.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ha-mode.md b/docs/ha-mode.md index 5036345b7..b7ec9ab74 100644 --- a/docs/ha-mode.md +++ b/docs/ha-mode.md @@ -12,7 +12,7 @@ Etcd ---- The `etcd_access_endpoint` fact provides an access pattern for clients. And the -`etcd_multiaccess` (defaults to `True`) group var controlls that behavior. +`etcd_multiaccess` (defaults to `True`) group var controls that behavior. It makes deployed components to access the etcd cluster members directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients do a loadbalancing and handle HA for connections. @@ -28,9 +28,9 @@ is less efficient than a dedicated load balancer because it creates extra health checks on the Kubernetes apiserver, but is more practical for scenarios where an external LB or virtual IP management is inconvenient. This option is configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`). -You may also define the port the local internal loadbalancer users by changing, +You may also define the port the local internal loadbalancer uses by changing, `nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`. -It is also import to note that Kubespray will only configure kubelet and kube-proxy +It is also important to note that Kubespray will only configure kubelet and kube-proxy on non-master nodes to use the local internal loadbalancer. If you choose to NOT use the local internal loadbalancer, you will need to configure From 018b5039e78691d5fbda581f47ff307bad30e0e6 Mon Sep 17 00:00:00 2001 From: John Ko Date: Thu, 20 Jul 2017 10:27:05 -0400 Subject: [PATCH 36/36] set loadbalancer_apiserver_localhost default true to match this https://github.com/kubernetes-incubator/kubespray/blob/master/roles/kubernetes/node/tasks/main.yml#L20 and the documented behaviour in HA docs related to #1456 @rsmitty --- roles/kubernetes/preinstall/tasks/set_facts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 03057829d..b154c96f8 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -20,7 +20,7 @@ - set_fact: kube_apiserver_endpoint: |- - {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%} + {% if not is_kube_master and loadbalancer_apiserver_localhost|default(true) -%} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} {%- elif is_kube_master -%} http://127.0.0.1:{{ kube_apiserver_insecure_port }}