From c91a3183d38e6a13bfd0596cd0e873a9e2376430 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 16:51:55 +0100 Subject: [PATCH 1/4] manage undefined vars for loadbalancing --- roles/dnsmasq/tasks/main.yml | 2 +- .../kubernetes/node/templates/manifests/kube-proxy.manifest.j2 | 2 +- roles/kubernetes/node/templates/openssl.conf.j2 | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 87c056167..b3585f47a 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -14,7 +14,7 @@ regexp: ".*{{ apiserver_loadbalancer_domain_name }}$" line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local" state: present - when: loadbalancer_apiserver is defined + when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined - name: clean hosts file lineinfile: diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 923c29764..3c429ec07 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -15,7 +15,7 @@ spec: {% if inventory_hostname in groups['kube-master'] %} - --master=http://127.0.0.1:8080 {% else %} -{% if loadbalancer_apiserver.address is defined | default('') %} +{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} - --master=https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port }} {% else %} - --master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }} diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2 index 3715d97b8..c594e3337 100644 --- a/roles/kubernetes/node/templates/openssl.conf.j2 +++ b/roles/kubernetes/node/templates/openssl.conf.j2 @@ -10,7 +10,9 @@ subjectAltName = @alt_names DNS.1 = kubernetes DNS.2 = kubernetes.default DNS.3 = kubernetes.default.svc.{{ dns_domain }} +{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} DNS.4 = {{ apiserver_loadbalancer_domain_name }} +{% endif %} {% for host in groups['kube-master'] %} IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} {% endfor %} From 9649f2779d1131429211fc00bd92e14559ef5d8d Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:01:29 +0100 Subject: [PATCH 2/4] Commenting out loadbalancing vars --- environments/test/group_vars/all.yml | 6 +++--- environments/test/group_vars/new-york.yml | 20 ++++++++++---------- environments/test/group_vars/paris.yml | 20 ++++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/environments/test/group_vars/all.yml b/environments/test/group_vars/all.yml index 35ae21a0a..41c87a57d 100644 --- a/environments/test/group_vars/all.yml +++ b/environments/test/group_vars/all.yml @@ -73,8 +73,8 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address # For multi masters architecture: # kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer # This domain name will be inserted into the /etc/hosts file of all servers -# configurationexample with haproxy : -# lissten kubernetes-apiserver-https +# configuration example with haproxy : +# listen kubernetes-apiserver-https # bind 10.99.0.21:8383 # option ssl-hello-chk # mode tcp @@ -83,4 +83,4 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address # server master1 10.99.0.26:443 # server master2 10.99.0.27:443 # balance roundrobin -apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" +# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" diff --git a/environments/test/group_vars/new-york.yml b/environments/test/group_vars/new-york.yml index 5b51961db..ce9d95396 100644 --- a/environments/test/group_vars/new-york.yml +++ b/environments/test/group_vars/new-york.yml @@ -1,10 +1,10 @@ ---- -peers: - -router_id: "10.99.0.34" - as: "65xxx" - - router_id: "10.99.0.35" - as: "65xxx" - -loadbalancer_apiserver: - address: "10.99.0.44" - port: "8383" +#--- +#peers: +# -router_id: "10.99.0.34" +# as: "65xxx" +# - router_id: "10.99.0.35" +# as: "65xxx" +# +#loadbalancer_apiserver: +# address: "10.99.0.44" +# port: "8383" diff --git a/environments/test/group_vars/paris.yml b/environments/test/group_vars/paris.yml index 052200ba6..e8b34ae0d 100644 --- a/environments/test/group_vars/paris.yml +++ b/environments/test/group_vars/paris.yml @@ -1,10 +1,10 @@ ---- -peers: - -router_id: "10.99.0.2" - as: "65xxx" - - router_id: "10.99.0.3" - as: "65xxx" - -loadbalancer_apiserver: - address: "10.99.0.21" - port: "8383" +#--- +#peers: +# -router_id: "10.99.0.2" +# as: "65xxx" +# - router_id: "10.99.0.3" +# as: "65xxx" +# +#loadbalancer_apiserver: +# address: "10.99.0.21" +# port: "8383" From f2069b296c6767324ce78ad5e3476f37b5f7ce94 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:16:19 +0100 Subject: [PATCH 3/4] BGP peering and loadbalancing vars are managed in a group_vars file --- README.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index c0f380039..9e810477f 100644 --- a/README.md +++ b/README.md @@ -106,14 +106,6 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes [k8s-cluster:children] kube-node kube-master - -[paris:vars] -peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}] -loadbalancer_address="10.99.0.24" - -[usa:vars] -peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}] -loadbalancer_address="10.99.0.44" ``` ### Playbook @@ -161,6 +153,9 @@ the server address has to be present on both groups 'kube-master' and 'kube-node * Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running * One etcd cluster member per node will be configured. For safety reasons, you should have at least two master nodes. + +* Kube-proxy doesn't support multiple apiservers on startup ([#18174]('https://github.com/kubernetes/kubernetes/issues/18174')). An external loadbalancer needs to be configured. +In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**' ### Network Overlay From 61bb6468ef8acfd49169302a2883fff074646afc Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:24:37 +0100 Subject: [PATCH 4/4] Update README, cluster.yml --- README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 9e810477f..89c4771c7 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ kube-master Run the playbook ``` -ansible-playbook -i environments/production/inventory cluster.yml -u root +ansible-playbook -i environments/test/inventory cluster.yml -u root ``` You can jump directly to "*Available apps, installation procedure*" @@ -59,7 +59,7 @@ Ansible ### Download binaries A role allows to download required binaries. They will be stored in a directory defined by the variable **'local_release_dir'** (by default /tmp). -Please ensure that you have enough disk space there (about **1G**). +Please ensure that you have enough disk space there (about **300M**). **Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory. @@ -116,13 +116,9 @@ kube-master roles: - { role: download, tags: download } -# etcd must be running on master(s) before going on -- hosts: etcd - roles: - - { role: etcd, tags: etcd } - - hosts: k8s-cluster roles: + - { role: etcd, tags: etcd } - { role: docker, tags: docker } - { role: dnsmasq, tags: dnsmasq } - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }