From 5e0249ae7cc486430d2d1d96cde80e087ff29f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Wed, 10 Apr 2019 14:56:18 +0200 Subject: [PATCH] Add HAProxy as internal loadbalancer (#4480) --- docs/ha-mode.md | 4 +- inventory/sample/group_vars/all/all.yml | 10 +++-- roles/download/defaults/main.yml | 14 +++++- roles/kubernetes/node/defaults/main.yml | 6 +-- roles/kubernetes/node/tasks/haproxy.yml | 25 +++++++++++ roles/kubernetes/node/tasks/main.yml | 7 ++- roles/kubernetes/node/tasks/nginx-proxy.yml | 7 ++- .../kubernetes/node/templates/haproxy.cfg.j2 | 43 +++++++++++++++++++ .../templates/manifests/haproxy.manifest.j2 | 43 +++++++++++++++++++ .../manifests/nginx-proxy.manifest.j2 | 10 ++--- roles/kubernetes/node/templates/nginx.conf.j2 | 8 ++-- roles/kubespray-defaults/defaults/main.yaml | 6 ++- roles/network_plugin/contiv/tasks/main.yml | 2 +- tests/files/gce_ubuntu-flannel-ha.yml | 1 + 14 files changed, 163 insertions(+), 23 deletions(-) create mode 100644 roles/kubernetes/node/tasks/haproxy.yml create mode 100644 roles/kubernetes/node/templates/haproxy.cfg.j2 create mode 100644 roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 diff --git a/docs/ha-mode.md b/docs/ha-mode.md index 0c9f54ef9..cb83801ea 100644 --- a/docs/ha-mode.md +++ b/docs/ha-mode.md @@ -24,7 +24,7 @@ where an external LB or virtual IP management is inconvenient. This option is configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`. Or `False`, if there is an external `loadbalancer_apiserver` defined). You may also define the port the local internal loadbalancer uses by changing, -`nginx_kube_apiserver_port`. This defaults to the value of +`loadbalancer_apiserver_port`. This defaults to the value of `kube_apiserver_port`. It is also important to note that Kubespray will only configure kubelet and kube-proxy on non-master nodes to use the local internal loadbalancer. @@ -114,7 +114,7 @@ Where: * `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray; * `lc` - localhost; * `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0'; -* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`; +* `nsp` - nginx secure port, `loadbalancer_apiserver_port`, defers to `sp`; * `sp` - secure port, `kube_apiserver_port`; * `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port; * `ip` - the node IP, defers to the ansible IP; diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml index e7c62e932..663277266 100644 --- a/inventory/sample/group_vars/all/all.yml +++ b/inventory/sample/group_vars/all/all.yml @@ -20,13 +20,15 @@ bin_dir: /usr/local/bin # port: 1234 ## Internal loadbalancers for apiservers -# loadbalancer_apiserver_localhost: true +loadbalancer_apiserver_localhost: true +loadbalancer_apiserver_type: haproxy ## Local loadbalancer should use this port ## And must be set port 6443 -nginx_kube_apiserver_port: 6443 -## If nginx_kube_apiserver_healthcheck_port variable defined, enables proxy liveness check. -nginx_kube_apiserver_healthcheck_port: 8081 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 ### OTHER OPTIONAL VARIABLES ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index cf038e46b..42a0fed06 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -202,6 +202,9 @@ multus_image_tag: "{{ multus_version }}" nginx_image_repo: nginx nginx_image_tag: 1.15 +haproxy_image_repo: haproxy +haproxy_image_tag: 1.9 + coredns_version: "1.4.0" coredns_image_repo: "coredns/coredns" coredns_image_tag: "{{ coredns_version }}" @@ -485,7 +488,7 @@ downloads: - k8s-cluster nginx: - enabled: "{{ loadbalancer_apiserver_localhost }}" + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}" container: true repo: "{{ nginx_image_repo }}" tag: "{{ nginx_image_tag }}" @@ -493,6 +496,15 @@ downloads: groups: - kube-node + haproxy: + enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}" + container: true + repo: "{{ haproxy_image_repo }}" + tag: "{{ haproxy_image_tag }}" + sha256: "{{ haproxy_digest_checksum|default(None) }}" + groups: + - kube-node + coredns: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" container: true diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index c802ab91e..7eb45c89b 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -42,9 +42,9 @@ kube_master_cpu_reserved: 200m kubelet_status_update_frequency: 10s -# Requests for nginx load balancer app -nginx_memory_requests: 32M -nginx_cpu_requests: 25m +# Requests for load balancer app +loadbalancer_apiserver_memory_requests: 32M +loadbalancer_apiserver_cpu_requests: 25m # kube_api_runtime_config: # - extensions/v1beta1/daemonsets=true diff --git a/roles/kubernetes/node/tasks/haproxy.yml b/roles/kubernetes/node/tasks/haproxy.yml new file mode 100644 index 000000000..ed899a3f4 --- /dev/null +++ b/roles/kubernetes/node/tasks/haproxy.yml @@ -0,0 +1,25 @@ +--- +- name: haproxy | Cleanup potentially deployed nginx-proxy + file: + path: "{{ kube_manifest_dir }}/nginx-proxy.yml" + state: absent + +- name: haproxy | Write static pod + template: + src: manifests/haproxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/haproxy.yml" + +- name: haproxy | Make haproxy directory + file: + path: "{{ haproxy_config_dir }}" + state: directory + mode: 0700 + owner: root + +- name: haproxy | Write haproxy configuration + template: + src: haproxy.cfg.j2 + dest: "{{ haproxy_config_dir }}/haproxy.cfg" + owner: root + mode: 0755 + backup: yes diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 24894848a..ef0be57ba 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -18,10 +18,15 @@ - kubelet - import_tasks: nginx-proxy.yml - when: is_kube_master == false and loadbalancer_apiserver_localhost + when: is_kube_master == false and loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' tags: - nginx +- import_tasks: haproxy.yml + when: is_kube_master == false and loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' + tags: + - haproxy + - name: Make sure dynamic kubelet configuration directory is writeable file: path: "{{ dynamic_kubelet_configuration_dir }}" diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml index 4b3b5f2f5..36c1d7306 100644 --- a/roles/kubernetes/node/tasks/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/nginx-proxy.yml @@ -1,8 +1,13 @@ --- +- name: haproxy | Cleanup potentially deployed haproxy + file: + path: "{{ kube_manifest_dir }}/haproxy.yml" + state: absent + - name: nginx-proxy | Write static pod template: src: manifests/nginx-proxy.manifest.j2 - dest: "{{kube_manifest_dir}}/nginx-proxy.yml" + dest: "{{ kube_manifest_dir }}/nginx-proxy.yml" - name: nginx-proxy | Make nginx directory file: diff --git a/roles/kubernetes/node/templates/haproxy.cfg.j2 b/roles/kubernetes/node/templates/haproxy.cfg.j2 new file mode 100644 index 000000000..76466b008 --- /dev/null +++ b/roles/kubernetes/node/templates/haproxy.cfg.j2 @@ -0,0 +1,43 @@ +global + maxconn 4000 + log 127.0.0.1 local0 + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 5 + timeout http-request 5m + timeout queue 5m + timeout connect 30s + timeout client 15m + timeout server 15m + timeout http-keep-alive 30s + timeout check 30s + maxconn 4000 + +{% if loadbalancer_apiserver_healthcheck_port is defined -%} +frontend healthz + bind *:{{ loadbalancer_apiserver_healthcheck_port }} + mode http + monitor-uri /healthz +{% endif %} + +frontend kube_api_frontend + bind *:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} + mode tcp + option tcplog + default_backend kube_api_backend + +backend kube_api_backend + mode tcp + balance leastconn + default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 + option httpchk GET /healthz + http-check expect status 200 + {% for host in groups['kube-master'] -%} + server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none + {% endfor -%} diff --git a/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 new file mode 100644 index 000000000..e0cca903f --- /dev/null +++ b/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-haproxy +spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/os: linux +{% if kube_version is version('v1.11.1', '>=') %} + priorityClassName: system-node-critical +{% endif %} + containers: + - name: haproxy + image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} + securityContext: + privileged: true + {% if loadbalancer_apiserver_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ loadbalancer_apiserver_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /usr/local/etc/haproxy/ + name: etc-haproxy + readOnly: true + volumes: + - name: etc-haproxy + hostPath: + path: {{ haproxy_config_dir }} diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index ed52f647e..18e85b3fa 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -19,19 +19,19 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} resources: requests: - cpu: {{ nginx_cpu_requests }} - memory: {{ nginx_memory_requests }} + cpu: {{ loadbalancer_apiserver_cpu_requests }} + memory: {{ loadbalancer_apiserver_memory_requests }} securityContext: privileged: true - {% if nginx_kube_apiserver_healthcheck_port is defined -%} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} livenessProbe: httpGet: path: /healthz - port: {{ nginx_kube_apiserver_healthcheck_port }} + port: {{ loadbalancer_apiserver_healthcheck_port }} readinessProbe: httpGet: path: /healthz - port: {{ nginx_kube_apiserver_healthcheck_port }} + port: {{ loadbalancer_apiserver_healthcheck_port }} {% endif -%} volumeMounts: - mountPath: /etc/nginx diff --git a/roles/kubernetes/node/templates/nginx.conf.j2 b/roles/kubernetes/node/templates/nginx.conf.j2 index 274139529..0c869d94a 100644 --- a/roles/kubernetes/node/templates/nginx.conf.j2 +++ b/roles/kubernetes/node/templates/nginx.conf.j2 @@ -19,7 +19,7 @@ stream { } server { - listen 127.0.0.1:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}; + listen {{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}; proxy_pass kube_apiserver; proxy_timeout 10m; proxy_connect_timeout 1s; @@ -38,13 +38,13 @@ http { server_tokens off; autoindex off; - {% if nginx_kube_apiserver_healthcheck_port is defined -%} + {% if loadbalancer_apiserver_healthcheck_port is defined -%} server { - listen {{ nginx_kube_apiserver_healthcheck_port }}; + listen {{ loadbalancer_apiserver_healthcheck_port }}; location /healthz { access_log off; return 200; } } - {% endif -%} + {% endif %} } diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c6407cb32..0d59873b3 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -37,6 +37,9 @@ ignore_assert_errors: false # nginx-proxy configure nginx_config_dir: "/etc/nginx" +# haproxy configure +haproxy_config_dir: "/etc/haproxy" + # Directory where the binaries will be installed bin_dir: /usr/local/bin docker_bin_dir: /usr/bin @@ -415,13 +418,14 @@ kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(fallback_ips[groups['kube-master'][0]])) }}" loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" +loadbalancer_apiserver_type: "nginx" # applied if only external loadbalancer_apiserver is defined, otherwise ignored apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" kube_apiserver_endpoint: |- {% if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%} - https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} + https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} {%- elif is_kube_master -%} https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }} {%- else -%} diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml index a5be03fac..d626cbd68 100644 --- a/roles/network_plugin/contiv/tasks/main.yml +++ b/roles/network_plugin/contiv/tasks/main.yml @@ -22,7 +22,7 @@ set_fact: kube_apiserver_endpoint_for_contiv: |- {% if not is_kube_master and loadbalancer_apiserver_localhost -%} - https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} + https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} {%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }} {%- if loadbalancer_apiserver.port|string != "443" -%} diff --git a/tests/files/gce_ubuntu-flannel-ha.yml b/tests/files/gce_ubuntu-flannel-ha.yml index 54aeac1c8..4057b27fa 100644 --- a/tests/files/gce_ubuntu-flannel-ha.yml +++ b/tests/files/gce_ubuntu-flannel-ha.yml @@ -12,3 +12,4 @@ skip_non_kubeadm_warning: true deploy_netchecker: true dns_min_replicas: 1 cloud_provider: gce +loadbalancer_apiserver_type: haproxy