From f3a4c31e666cbf27b9e432782bd6618017d2a075 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Thu, 15 Jun 2017 18:15:52 +0800 Subject: [PATCH 01/22] add kube-node to system:nodes group, add system:kube-proxy cert for kube-proxy --- roles/kubernetes/node/tasks/main.yml | 7 +++++-- .../templates/kube-proxy-kubeconfig.yaml.j2 | 18 ++++++++++++++++++ .../templates/manifests/kube-proxy.manifest.j2 | 6 +++--- roles/kubernetes/secrets/files/make-ssl.sh | 11 +++++++++-- .../secrets/tasks/gen_certs_script.yml | 9 ++++++++- 5 files changed, 43 insertions(+), 8 deletions(-) create mode 100644 roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index f09845f76..e0558f8cd 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -30,9 +30,12 @@ - name: write the kubecfg (auth) file for kubelet template: - src: node-kubeconfig.yaml.j2 - dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + src: "{{ item }}-kubeconfig.yaml.j2" + dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml" backup: yes + with_items: + - node + - kube-proxy notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 new file mode 100644 index 000000000..cd305b493 --- /dev/null +++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-proxy + user: + client-certificate: {{ kube_cert_dir }}/kube-proxy.pem + client-key: {{ kube_cert_dir }}/kube-proxy-key.pem +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-{{ cluster_name }} +current-context: kube-proxy-{{ cluster_name }} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 9b7d53857..d584bdd7d 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -27,7 +27,7 @@ spec: - --v={{ kube_log_level }} - --master={{ kube_apiserver_endpoint }} {% if not is_kube_master %} - - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml + - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml {% endif %} - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} @@ -41,7 +41,7 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml + - mountPath: {{kube_config_dir}}/kube-proxy-kubeconfig.yaml name: "kubeconfig" readOnly: true - mountPath: {{kube_config_dir}}/ssl @@ -60,7 +60,7 @@ spec: {% endif %} - name: "kubeconfig" hostPath: - path: "{{kube_config_dir}}/node-kubeconfig.yaml" + path: "{{kube_config_dir}}/kube-proxy-kubeconfig.yaml" - name: "etc-kube-ssl" hostPath: path: "{{kube_config_dir}}/ssl" diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 55ea13d1e..8fec4f314 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -80,6 +80,7 @@ if [ ! -e "$SSLDIR/ca-key.pem" ]; then cat ca.pem >> apiserver.pem fi +# Admins if [ -n "$MASTERS" ]; then for host in $MASTERS; do cn="${host%%.*}" @@ -90,16 +91,22 @@ if [ -n "$MASTERS" ]; then done fi -# Nodes and Admin +# Nodes if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" # node key openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1 + openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}/O=system:nodes" > /dev/null 2>&1 openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1 done fi +# system:kube-proxy +openssl genrsa -out kube-proxy-key.pem 2048 > /dev/null 2>&1 +openssl req -new -key kube-proxy-key.pem -out kube-proxy.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1 +openssl x509 -req -in kube-proxy.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy.pem -days 3650 > /dev/null 2>&1 + + # Install certs mv *.pem ${SSLDIR}/ diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 8df2195bf..0629e3ea5 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -69,11 +69,18 @@ 'apiserver-key.pem' ] all_node_certs: "['ca.pem', + 'kube-proxy.pem', + 'kube-proxy-key.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', 'node-{{ node }}-key.pem', {% endfor %}]" - my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem'] + my_node_certs: ['ca.pem', + 'kube-proxy.pem', + 'kube-proxy-key.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem' + ] tags: facts - name: Gen_certs | Gather master certs From 8b58394d8c9e0d394e784b558e6f8d1cd623de93 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Thu, 15 Jun 2017 19:20:58 +0800 Subject: [PATCH 02/22] seperate kube-proxy certs for each node --- .../node/templates/kube-proxy-kubeconfig.yaml.j2 | 4 ++-- roles/kubernetes/secrets/files/make-ssl.sh | 12 +++++++++--- roles/kubernetes/secrets/tasks/gen_certs_script.yml | 10 +++++----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 index cd305b493..18c47cd3e 100644 --- a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 +++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 @@ -8,8 +8,8 @@ clusters: users: - name: kube-proxy user: - client-certificate: {{ kube_cert_dir }}/kube-proxy.pem - client-key: {{ kube_cert_dir }}/kube-proxy-key.pem + client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem contexts: - context: cluster: local diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 8fec4f314..dde5873fb 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -103,9 +103,15 @@ if [ -n "$HOSTS" ]; then fi # system:kube-proxy -openssl genrsa -out kube-proxy-key.pem 2048 > /dev/null 2>&1 -openssl req -new -key kube-proxy-key.pem -out kube-proxy.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1 -openssl x509 -req -in kube-proxy.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy.pem -days 3650 > /dev/null 2>&1 +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + # kube-proxy key + openssl genrsa -out kube-proxy-${host}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key kube-proxy-${host}-key.pem -out kube-proxy-${host}.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1 + openssl x509 -req -in kube-proxy-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy-${host}.pem -days 3650 > /dev/null 2>&1 + done +fi # Install certs diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 0629e3ea5..1920b696b 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -69,17 +69,17 @@ 'apiserver-key.pem' ] all_node_certs: "['ca.pem', - 'kube-proxy.pem', - 'kube-proxy-key.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', 'node-{{ node }}-key.pem', + 'kube-proxy-{{ node }}.pem', + 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" my_node_certs: ['ca.pem', - 'kube-proxy.pem', - 'kube-proxy-key.pem', 'node-{{ inventory_hostname }}.pem', - 'node-{{ inventory_hostname }}-key.pem' + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem', ] tags: facts From 0ee229488ed1e0c30fa4f7e4ff9f97d91ecf938c Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 16 Jun 2017 14:21:21 +0800 Subject: [PATCH 03/22] certs for system:kube-controller-manager system:kube-scheduler --- roles/kubernetes/master/tasks/main.yml | 22 +++++++++--- ...kube-controller-manager-kubeconfig.yaml.j2 | 18 ++++++++++ .../kube-scheduler-kubeconfig.yaml.j2 | 18 ++++++++++ .../kube-controller-manager.manifest.j2 | 32 ++++++++++++----- .../manifests/kube-scheduler.manifest.j2 | 26 +++++++++++++- .../manifests/kube-proxy.manifest.j2 | 22 ++++++------ roles/kubernetes/secrets/files/make-ssl.sh | 36 ++++++++++--------- .../secrets/tasks/gen_certs_script.yml | 14 ++++++-- 8 files changed, 143 insertions(+), 45 deletions(-) create mode 100644 roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index dadef4bf5..6922e6a51 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -60,12 +60,11 @@ when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] tags: apps -- name: Write kube-controller-manager manifest +- name: Write kube-scheduler kubeconfig template: - src: manifests/kube-controller-manager.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" - notify: Master | wait for kube-controller-manager - tags: kube-controller-manager + src: kube-scheduler-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + tags: kube-scheduler - name: Write kube-scheduler manifest template: @@ -74,6 +73,19 @@ notify: Master | wait for kube-scheduler tags: kube-scheduler +- name: Write kube-controller-manager kubeconfig + template: + src: kube-controller-manager-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + tags: kube-controller-manager + +- name: Write kube-controller-manager manifest + template: + src: manifests/kube-controller-manager.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" + notify: Master | wait for kube-controller-manager + tags: kube-controller-manager + - include: post-upgrade.yml tags: k8s-post-upgrade diff --git a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 new file mode 100644 index 000000000..887d022c1 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-controller-manager + user: + client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem + client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem +contexts: +- context: + cluster: local + user: kube-controller-manager + name: kube-controller-manager-{{ cluster_name }} +current-context: kube-controller-manager-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 new file mode 100644 index 000000000..974b72427 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-scheduler + user: + client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem + client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem +contexts: +- context: + cluster: local + user: kube-scheduler + name: kube-scheduler-{{ cluster_name }} +current-context: kube-scheduler-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index d3f8a23a5..f65bb004c 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -24,7 +24,7 @@ spec: command: - /hyperkube - controller-manager - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem @@ -61,20 +61,36 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{kube_config_dir}}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + name: kubeconfig readOnly: true {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %} - - mountPath: {{ kube_config_dir }}/cloud_config + - mountPath: "{{ kube_config_dir }}/cloud_config" name: cloudconfig readOnly: true {% endif %} volumes: - - hostPath: - path: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - hostPath: - path: {{ kube_config_dir }}/cloud_config + path: "{{ kube_config_dir }}/cloud_config" name: cloudconfig {% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 441f991eb..1508e60cf 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -25,7 +25,7 @@ spec: - /hyperkube - scheduler - --leader-elect=true - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --v={{ kube_log_level }} {% if scheduler_custom_flags is string %} - {{ scheduler_custom_flags }} @@ -41,3 +41,27 @@ spec: port: 10251 initialDelaySeconds: 30 timeoutSeconds: 10 + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + name: kubeconfig + readOnly: true + volumes: + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index d584bdd7d..bbb13bc9d 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -41,14 +41,14 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: {{kube_config_dir}}/kube-proxy-kubeconfig.yaml - name: "kubeconfig" + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl readOnly: true - - mountPath: {{kube_config_dir}}/ssl - name: "etc-kube-ssl" + - mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + name: kubeconfig readOnly: true - mountPath: /var/run/dbus - name: "var-run-dbus" + name: var-run-dbus readOnly: false volumes: - name: ssl-certs-host @@ -58,12 +58,12 @@ spec: {% else %} path: /usr/share/ca-certificates {% endif %} - - name: "kubeconfig" + - name: etc-kube-ssl hostPath: - path: "{{kube_config_dir}}/kube-proxy-kubeconfig.yaml" - - name: "etc-kube-ssl" + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig hostPath: - path: "{{kube_config_dir}}/ssl" - - name: "var-run-dbus" + path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + - name: var-run-dbus hostPath: - path: "/var/run/dbus" + path: /var/run/dbus diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index dde5873fb..5383e7adb 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -72,22 +72,30 @@ else openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 fi +gen_key_and_cert() { + local name=$1 + local subject=$2 + openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 +} + if [ ! -e "$SSLDIR/ca-key.pem" ]; then - # kube-apiserver key - openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 + # kube-apiserver + gen_key_and_cert "apiserver" "/CN=kube-apiserver" cat ca.pem >> apiserver.pem + # kube-scheduler + gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler" + # kube-controller-manager + gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager" fi # Admins if [ -n "$MASTERS" ]; then for host in $MASTERS; do cn="${host%%.*}" - # admin key - openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1 - openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1 + # admin + gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters" done fi @@ -95,10 +103,7 @@ fi if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" - # node key - openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}/O=system:nodes" > /dev/null 2>&1 - openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1 + gen_key_and_cert "node-${host}" "/CN=kube-node-${cn}/O=system:nodes" done fi @@ -106,13 +111,10 @@ fi if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" - # kube-proxy key - openssl genrsa -out kube-proxy-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key kube-proxy-${host}-key.pem -out kube-proxy-${host}.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1 - openssl x509 -req -in kube-proxy-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy-${host}.pem -days 3650 > /dev/null 2>&1 + # kube-proxy + gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy" done fi - # Install certs mv *.pem ${SSLDIR}/ diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 1920b696b..61d9c7826 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -56,17 +56,25 @@ - set_fact: all_master_certs: "['ca-key.pem', + 'apiserver.pem', + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', - 'apiserver.pem', - 'apiserver-key.pem', {% endfor %}]" my_master_certs: ['ca-key.pem', 'admin-{{ inventory_hostname }}.pem', 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', - 'apiserver-key.pem' + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', ] all_node_certs: "['ca.pem', {% for node in groups['k8s-cluster'] %} From 765a5ce1ab5189279d71b8c3e356e1618ad8849e Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 16 Jun 2017 17:15:37 +0800 Subject: [PATCH 04/22] node identified as system:node: --- roles/kubernetes/secrets/files/make-ssl.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 5383e7adb..e8574cc6b 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -103,7 +103,7 @@ fi if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" - gen_key_and_cert "node-${host}" "/CN=kube-node-${cn}/O=system:nodes" + gen_key_and_cert "node-${host}" "/CN=system:node:${cn}/O=system:nodes" done fi From acbdfb08ce9b41b56199f3211fb676f374321aeb Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 16 Jun 2017 18:54:18 +0800 Subject: [PATCH 05/22] run kubedns as system:serviceaccount:kube-system:kube-dns; but dns does NOT work --- roles/kubernetes-apps/ansible/tasks/main.yml | 1 + roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml | 1 + .../ansible/templates/kubedns-serviceaccount.yml | 5 +++++ 3 files changed, 7 insertions(+) create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index ed0d11f28..5225bbda3 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,6 +13,7 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: + - {name: kube-dns, file: kubedns-serviceaccount.yml, type: serviceaccount} - {name: kubedns, file: kubedns-deploy.yml, type: deployment} - {name: kubedns, file: kubedns-svc.yml, type: svc} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index a2150cc70..4c7a7eec7 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -114,3 +114,4 @@ spec: - containerPort: 8080 protocol: TCP dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml b/roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml new file mode 100644 index 000000000..8cf41ae23 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: {{ system_namespace }} From 7c2816ba738887988c476f1a7bb4c3c95659c63c Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 16 Jun 2017 20:08:19 +0800 Subject: [PATCH 06/22] add label for kube-dns sa --- .../templates/{kubedns-serviceaccount.yml => kubedns-sa.yml} | 2 ++ 1 file changed, 2 insertions(+) rename roles/kubernetes-apps/ansible/templates/{kubedns-serviceaccount.yml => kubedns-sa.yml} (65%) diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml similarity index 65% rename from roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml rename to roles/kubernetes-apps/ansible/templates/kubedns-sa.yml index 8cf41ae23..e520ccbfc 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-serviceaccount.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -3,3 +3,5 @@ kind: ServiceAccount metadata: name: kube-dns namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" From 0dc38ff9b386057d67aa11e0239894270de6dd22 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Fri, 16 Jun 2017 10:28:23 -0400 Subject: [PATCH 07/22] Basic RBAC functionality. (Based from work done by @jwfang (#1351)) * Add a flag "authorization_method", when set to "RBAC" enables role based access control. * Add required cluster roles and bindings for kube-dns * Patch tiller deployment to use a service account with proper credentials. * Add a flag to regenerate kubernetes certs on the nodes. --- docs/vars.md | 7 +++- .../kubernetes-apps/ansible/defaults/main.yml | 8 +++++ roles/kubernetes-apps/ansible/tasks/main.yml | 8 ++++- .../kubedns-autoscaler-clusterrole.yml | 32 +++++++++++++++++++ .../kubedns-autoscaler-clusterrolebinding.yml | 27 ++++++++++++++++ .../templates/kubedns-autoscaler-sa.yml | 19 +++++++++++ .../ansible/templates/kubedns-autoscaler.yml | 3 +- .../ansible/templates/kubedns-deploy.yml | 4 +++ .../ansible/templates/kubedns-sa.yml | 4 +++ roles/kubernetes-apps/helm/defaults/main.yml | 2 ++ roles/kubernetes-apps/helm/tasks/main.yml | 18 +++++++++++ roles/kubernetes/master/defaults/main.yml | 4 ++- .../manifests/kube-apiserver.manifest.j2 | 3 ++ .../kube-controller-manager.manifest.j2 | 3 ++ roles/kubernetes/node/handlers/main.yml | 5 +++ roles/kubernetes/node/tasks/main.yml | 7 ++-- roles/kubernetes/node/tasks/pre_upgrade.yml | 4 +++ .../templates/kube-proxy-kubeconfig.yaml.j2 | 18 +++++++++++ .../manifests/kube-proxy.manifest.j2 | 6 ++-- roles/kubernetes/secrets/defaults/main.yml | 2 ++ roles/kubernetes/secrets/files/make-ssl.sh | 17 ++++++++-- .../kubernetes/secrets/tasks/check-certs.yml | 10 +++--- .../secrets/tasks/gen_certs_script.yml | 11 +++++-- 23 files changed, 203 insertions(+), 19 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-sa.yml create mode 100644 roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 diff --git a/docs/vars.md b/docs/vars.md index 603a614b2..b3aee535a 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,7 +67,11 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes - +* *authorization_mode* - Set this to "RBAC" (upper-case, no quotes) +[to enable Role Based Access Control](https://kubernetes.io/docs/admin/authorization/rbac/) +* *rotate_kubernetes_certs* - Set this to true to regenerate kubernetes Node certificates. *Warning: Will overwrite old certs.* + + Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. @@ -116,3 +120,4 @@ The possible vars are: Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their passwords default to changeme. You can set this by changing ``kube_api_pwd``. + diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 89bdd4277..a5329c635 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -40,3 +40,11 @@ netchecker_server_memory_requests: 64M # SSL etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" + +# RBAC +rbac_resources: + - clusterrole, + - clusterrolebinding, + - sa + +rbac_enabled: "{{ authorization_mode == 'RBAC' }}" \ No newline at end of file diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index ed0d11f28..2c4b14ad8 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,11 +13,15 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: + - {name: kubedns, file: kubedns-sa.yml, type: sa} - {name: kubedns, file: kubedns-deploy.yml, type: deployment} - {name: kubedns, file: kubedns-svc.yml, type: svc} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in rbac_resources or rbac_enabled) tags: dnsmasq - name: Kubernetes Apps | Start Resources @@ -29,6 +33,7 @@ filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] tags: dnsmasq @@ -36,3 +41,4 @@ include: tasks/netchecker.yml when: deploy_netchecker tags: netchecker + diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml new file mode 100644 index 000000000..b3324abf2 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -0,0 +1,32 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml new file mode 100644 index 000000000..c4cdda9f1 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: kube-system +subjects: + - kind: ServiceAccount + name: cluster-proportional-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-proportional-autoscaler + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml new file mode 100644 index 000000000..521f12ad9 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -0,0 +1,19 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: cluster-proportional-autoscaler + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index c0f519e2c..829df512e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -46,4 +46,5 @@ spec: - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --logtostderr=true - --v=2 - + serviceAccountName: cluster-proportional-autoscaler + serviceAccount: cluster-proportional-autoscaler diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index a2150cc70..1bd9fcc51 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -114,3 +114,7 @@ spec: - containerPort: 8080 protocol: TCP dnsPolicy: Default # Don't use cluster DNS. +{% if authorization_mode is defined and authorization_mode == "RBAC" %} + serviceAccount: kube-dns + serviceAccountName: kube-dns +{% endif %} \ No newline at end of file diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml new file mode 100644 index 000000000..d62dc6b16 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns \ No newline at end of file diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml index b1b2dfca9..5cb600439 100644 --- a/roles/kubernetes-apps/helm/defaults/main.yml +++ b/roles/kubernetes-apps/helm/defaults/main.yml @@ -2,3 +2,5 @@ helm_enabled: false # specify a dir and attach it to helm for HELM_HOME. helm_home_dir: "/root/.helm" + +rbac_enabled: "{{ authorization_mode == 'RBAC' }}" \ No newline at end of file diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 1d50f8b9b..4443015ec 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -10,10 +10,28 @@ mode: 0755 register: helm_container +- name: Helm | Configure tiller service account for RBAC + command: kubectl create serviceaccount tiller --namespace=kube-system + ignore_errors: yes + when: rbac_enabled + +- name: Helm | Configure tiller rolebindings for RBAC + command: kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + ignore_errors: yes + when: rbac_enabled + - name: Helm | Install/upgrade helm command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}" when: helm_container.changed +- name: Helm | Patch tiller deployment for RBAC + shell: > + kubectl --namespace=kube-system get deployment tiller-deploy -o json | \ + python -c 'import sys,json;a=json.load(sys.stdin);a["spec"]["template"]["spec"]["serviceAccount"]="tiller";json.dump(a,sys.stdout)' | \ + kubectl apply -f - + when: rbac_enabled + - name: Helm | Set up bash completion shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh" when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] ) + diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 785ef43af..96901e235 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -64,4 +64,6 @@ apiserver_custom_flags: [] controller_mgr_custom_flags: [] -scheduler_custom_flags: [] \ No newline at end of file +scheduler_custom_flags: [] + +authorization_mode: RBAC \ No newline at end of file diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 851cca060..5a2101b73 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -81,6 +81,9 @@ spec: {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - --anonymous-auth={{ kube_api_anonymous_auth }} {% endif %} +{% if authorization_mode %} + - --authorization-mode={{ authorization_mode }} +{% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} {% else %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index d3f8a23a5..d8122eec2 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -35,6 +35,9 @@ spec: - --node-monitor-period={{ kube_controller_node_monitor_period }} - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }} - --v={{ kube_log_level }} +{% if authorization_mode is defined and authorization_mode == "RBAC" %} + - --use-service-account-credentials +{% endif %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml index 00525b995..079cbd3da 100644 --- a/roles/kubernetes/node/handlers/main.yml +++ b/roles/kubernetes/node/handlers/main.yml @@ -1,4 +1,9 @@ --- +- name: restart kubelet if secrets changed + command: /bin/true + when: secret_changed|d(False) + notify: restart kubelet + - name: restart kubelet command: /bin/true notify: diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index f09845f76..e0558f8cd 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -30,9 +30,12 @@ - name: write the kubecfg (auth) file for kubelet template: - src: node-kubeconfig.yaml.j2 - dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + src: "{{ item }}-kubeconfig.yaml.j2" + dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml" backup: yes + with_items: + - node + - kube-proxy notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml index 612dd3e6f..291817562 100644 --- a/roles/kubernetes/node/tasks/pre_upgrade.yml +++ b/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -4,3 +4,7 @@ args: creates: "/var/lib/cni" failed_when: false + +- name: "Pre-upgrade | Make sure to restart kubelet if certificates changed" + command: /bin/true + notify: restart kubelet if secrets changed \ No newline at end of file diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 new file mode 100644 index 000000000..18c47cd3e --- /dev/null +++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-proxy + user: + client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-{{ cluster_name }} +current-context: kube-proxy-{{ cluster_name }} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 9b7d53857..d584bdd7d 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -27,7 +27,7 @@ spec: - --v={{ kube_log_level }} - --master={{ kube_apiserver_endpoint }} {% if not is_kube_master %} - - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml + - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml {% endif %} - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} @@ -41,7 +41,7 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml + - mountPath: {{kube_config_dir}}/kube-proxy-kubeconfig.yaml name: "kubeconfig" readOnly: true - mountPath: {{kube_config_dir}}/ssl @@ -60,7 +60,7 @@ spec: {% endif %} - name: "kubeconfig" hostPath: - path: "{{kube_config_dir}}/node-kubeconfig.yaml" + path: "{{kube_config_dir}}/kube-proxy-kubeconfig.yaml" - name: "etc-kube-ssl" hostPath: path: "{{kube_config_dir}}/ssl" diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml index e6177857e..3b65c23a9 100644 --- a/roles/kubernetes/secrets/defaults/main.yml +++ b/roles/kubernetes/secrets/defaults/main.yml @@ -1,2 +1,4 @@ --- kube_cert_group: kube-cert + +rotate_kubernetes_certs: false # set this to true to regenerate certificates diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 55ea13d1e..dde5873fb 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -80,6 +80,7 @@ if [ ! -e "$SSLDIR/ca-key.pem" ]; then cat ca.pem >> apiserver.pem fi +# Admins if [ -n "$MASTERS" ]; then for host in $MASTERS; do cn="${host%%.*}" @@ -90,16 +91,28 @@ if [ -n "$MASTERS" ]; then done fi -# Nodes and Admin +# Nodes if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" # node key openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1 + openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}/O=system:nodes" > /dev/null 2>&1 openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1 done fi +# system:kube-proxy +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + # kube-proxy key + openssl genrsa -out kube-proxy-${host}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key kube-proxy-${host}-key.pem -out kube-proxy-${host}.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1 + openssl x509 -req -in kube-proxy-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy-${host}.pem -days 3650 > /dev/null 2>&1 + done +fi + + # Install certs mv *.pem ${SSLDIR}/ diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 061b04b04..fafc20c1e 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -10,8 +10,8 @@ - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false" set_fact: - sync_certs: false - gen_certs: false + sync_certs: true + gen_certs: true secret_changed: false - name: "Check certs | check if a cert already exists on node" @@ -25,7 +25,7 @@ - name: "Check_certs | Set 'gen_certs' to true" set_fact: gen_certs: true - when: "not item in kubecert_master.files|map(attribute='path') | list" + when: "rotate_kubernetes_certs or item not in (kubecert_master.files|map(attribute='path')|list)" run_once: true with_items: >- ['{{ kube_cert_dir }}/ca.pem', @@ -41,7 +41,7 @@ {% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %} {% for host in groups['k8s-cluster'] -%} {% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %} - {% if host_cert in existing_certs -%} + {% if host_cert in existing_certs and not rotate_kubernetes_certs -%} "{{ host }}": False, {% else -%} "{{ host }}": True, @@ -62,5 +62,5 @@ (kubecert_node.results[1].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[1].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} - {{ certs.sync }} + {{ rotate_kubernetes_certs or certs.sync }} diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 8df2195bf..09270beb4 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -72,8 +72,15 @@ {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', 'node-{{ node }}-key.pem', + 'kube-proxy-{{ node }}.pem', + 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" - my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem'] + my_node_certs: ['ca.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem', + ] tags: facts - name: Gen_certs | Gather master certs @@ -121,7 +128,6 @@ - name: Gen_certs | Unpack certs on masters shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}" no_log: true - changed_when: false check_mode: no when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and inventory_hostname != groups['kube-master'][0] @@ -139,7 +145,6 @@ args: executable: /bin/bash no_log: true - changed_when: false check_mode: no when: inventory_hostname in groups['kube-node'] and sync_certs|default(false) and From 9924a33d6f28c15c50f5d796b9ad3be402fe2d89 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Fri, 16 Jun 2017 11:21:59 -0400 Subject: [PATCH 08/22] Replace static references to system namespace --- .../ansible/templates/kubedns-autoscaler-clusterrole.yml | 2 +- .../templates/kubedns-autoscaler-clusterrolebinding.yml | 4 ++-- .../ansible/templates/kubedns-autoscaler-sa.yml | 2 +- .../ansible/templates/kubedns-autoscaler.yml | 4 ++-- roles/kubernetes-apps/helm/tasks/main.yml | 8 ++++---- .../master/templates/manifests/kube-scheduler.manifest.j2 | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml index b3324abf2..a194426c6 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -16,7 +16,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} rules: - apiGroups: [""] resources: ["nodes"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml index c4cdda9f1..1bdb2a715 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -16,11 +16,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} subjects: - kind: ServiceAccount name: cluster-proportional-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} roleRef: kind: ClusterRole name: cluster-proportional-autoscaler diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml index 521f12ad9..9544a7dd9 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -16,4 +16,4 @@ kind: ServiceAccount apiVersion: v1 metadata: name: cluster-proportional-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index 829df512e..28a67af4f 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -39,7 +39,7 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace=kube-system + - --namespace={{ system_namespace }} - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kubedns diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 4443015ec..8c8123eb0 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -11,12 +11,12 @@ register: helm_container - name: Helm | Configure tiller service account for RBAC - command: kubectl create serviceaccount tiller --namespace=kube-system + command: kubectl create serviceaccount tiller --namespace={{ system_namespace }} ignore_errors: yes when: rbac_enabled - name: Helm | Configure tiller rolebindings for RBAC - command: kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + command: kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount={{ system_namespace }}:tiller ignore_errors: yes when: rbac_enabled @@ -26,9 +26,9 @@ - name: Helm | Patch tiller deployment for RBAC shell: > - kubectl --namespace=kube-system get deployment tiller-deploy -o json | \ + kubectl --namespace={{ system_namespace }} get deployment tiller-deploy -o json | \ python -c 'import sys,json;a=json.load(sys.stdin);a["spec"]["template"]["spec"]["serviceAccount"]="tiller";json.dump(a,sys.stdout)' | \ - kubectl apply -f - + kubectl apply -n {{ system_namespace }} -f - when: rbac_enabled - name: Helm | Set up bash completion diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 1508e60cf..fdc16bf7f 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kube-scheduler spec: From b800f7bb074aac3afa3d7e770e8bf8a9d6ab5b1b Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Fri, 16 Jun 2017 12:29:13 -0400 Subject: [PATCH 09/22] Use kubectl patch --- roles/kubernetes-apps/helm/tasks/main.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 8c8123eb0..aaa1d3c5d 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -25,10 +25,7 @@ when: helm_container.changed - name: Helm | Patch tiller deployment for RBAC - shell: > - kubectl --namespace={{ system_namespace }} get deployment tiller-deploy -o json | \ - python -c 'import sys,json;a=json.load(sys.stdin);a["spec"]["template"]["spec"]["serviceAccount"]="tiller";json.dump(a,sys.stdout)' | \ - kubectl apply -n {{ system_namespace }} -f - + command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }} when: rbac_enabled - name: Helm | Set up bash completion From 20cacc09bab2ee78ce17a9e5025ebc309b172c88 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Sat, 17 Jun 2017 16:22:58 +0800 Subject: [PATCH 10/22] fix rename --- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 5225bbda3..151cc515c 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,7 +13,7 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: - - {name: kube-dns, file: kubedns-serviceaccount.yml, type: serviceaccount} + - {name: kube-dns, file: kubedns-sa.yml, type: serviceaccount} - {name: kubedns, file: kubedns-deploy.yml, type: deployment} - {name: kubedns, file: kubedns-svc.yml, type: svc} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} From 36e3aae6153ca75f488accd2a1c64bbc680c41ce Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Sat, 17 Jun 2017 19:53:29 +0800 Subject: [PATCH 11/22] patch system:kube-dns clusterrole for get --- roles/kubernetes-apps/ansible/tasks/main.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 151cc515c..f65a64029 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -21,6 +21,23 @@ when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] tags: dnsmasq +# see https://github.com/kubernetes/kubernetes/issues/45084 +# TODO: this is only needed for "old" kube-dns +- name: Kubernetes Apps | Patch system:kube-dns ClusterRole + command: > + {{bin_dir}}/kubectl patch clusterrole system:kube-dns + --patch='{ + "rules": [ + { + "apiGroups" : [""], + "resources" : ["endpoints", "services"], + "verbs": ["list", "watch", "get"] + } + ] + }' + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + tags: dnsmasq + - name: Kubernetes Apps | Start Resources kube: name: "{{item.item.name}}" From c4fbf41220adcfe8022b6acbbf02eea1fc8c0eea Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 19 Jun 2017 12:48:07 +0800 Subject: [PATCH 12/22] replace insecure port with secure port for apiserver_endpoint on kube-masters --- roles/kubernetes/preinstall/tasks/set_facts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 03057829d..edfac2e2e 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -23,7 +23,7 @@ {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} {%- elif is_kube_master -%} - http://127.0.0.1:{{ kube_apiserver_insecure_port }} + https://127.0.0.1:{{ kube_apiserver_port }} {%- else -%} {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} From e663c6b61aa0fe934819459a28b4eabbcd4bba6e Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Mon, 19 Jun 2017 10:24:56 -0400 Subject: [PATCH 13/22] Address PR feedback. * Consolidate variable definitions to `kargo-defaults`. * Set `AlwaysAllow` as the default authorization mode. * Ability to set multiple authorization modes. * Various style fixes and typos --- roles/kargo-defaults/defaults/main.yaml | 6 ++++++ roles/kubernetes-apps/ansible/defaults/main.yml | 7 +++---- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- .../templates/kubedns-autoscaler-clusterrolebinding.yml | 2 +- roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml | 5 ++--- roles/kubernetes-apps/helm/defaults/main.yml | 2 -- roles/kubernetes/master/defaults/main.yml | 1 - .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- .../manifests/kube-controller-manager.manifest.j2 | 2 +- roles/kubernetes/secrets/tasks/check-certs.yml | 4 ++-- 10 files changed, 18 insertions(+), 17 deletions(-) diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index f0323d479..9a7368d29 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -114,3 +114,9 @@ vault_deployment_type: docker k8s_image_pull_policy: IfNotPresent efk_enabled: false enable_network_policy: false + +## List of authorization plugins that must be configured for +## the k8s cluster. Only 'AlwaysAllow' and 'RBAC' is supported +## at the moment. +authorization_mode: ['AlwaysAllow'] +rbac_enabled: "{{ 'RBAC' in authorization_mode }}" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index a5329c635..13f8e41a2 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,10 +41,9 @@ netchecker_server_memory_requests: 64M etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" -# RBAC -rbac_resources: +# RBAC specific resources that will be ignored when RBAC is not enabled. +apiserver_rbac_resources: - clusterrole, - clusterrolebinding, - sa - -rbac_enabled: "{{ authorization_mode == 'RBAC' }}" \ No newline at end of file + - serviceaccount diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 1e373caa3..626e30620 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -21,7 +21,7 @@ - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in rbac_resources or rbac_enabled) + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in apiserver_rbac_resources or rbac_enabled) tags: dnsmasq - name: Kubernetes Apps | Start Resources diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml index 1bdb2a715..a368ae333 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -24,4 +24,4 @@ subjects: roleRef: kind: ClusterRole name: cluster-proportional-autoscaler - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index 1bd9fcc51..6620c1642 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -114,7 +114,6 @@ spec: - containerPort: 8080 protocol: TCP dnsPolicy: Default # Don't use cluster DNS. -{% if authorization_mode is defined and authorization_mode == "RBAC" %} - serviceAccount: kube-dns +{% if rbac_enabled %} serviceAccountName: kube-dns -{% endif %} \ No newline at end of file +{% endif %} diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml index 5cb600439..b1b2dfca9 100644 --- a/roles/kubernetes-apps/helm/defaults/main.yml +++ b/roles/kubernetes-apps/helm/defaults/main.yml @@ -2,5 +2,3 @@ helm_enabled: false # specify a dir and attach it to helm for HELM_HOME. helm_home_dir: "/root/.helm" - -rbac_enabled: "{{ authorization_mode == 'RBAC' }}" \ No newline at end of file diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 96901e235..0536432e5 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -66,4 +66,3 @@ controller_mgr_custom_flags: [] scheduler_custom_flags: [] -authorization_mode: RBAC \ No newline at end of file diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 5a2101b73..fddd66a27 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -82,7 +82,7 @@ spec: - --anonymous-auth={{ kube_api_anonymous_auth }} {% endif %} {% if authorization_mode %} - - --authorization-mode={{ authorization_mode }} + - --authorization-mode={{ authorization_mode|join(',') }} {% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} @@ -127,4 +127,4 @@ spec: - hostPath: path: /etc/ssl/certs/ca-bundle.crt name: rhel-ca-bundle -{% endif %} \ No newline at end of file +{% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index a5171f82f..a6b69fa14 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -35,7 +35,7 @@ spec: - --node-monitor-period={{ kube_controller_node_monitor_period }} - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }} - --v={{ kube_log_level }} -{% if authorization_mode is defined and authorization_mode == "RBAC" %} +{% if rbac_enabled %} - --use-service-account-credentials {% endif %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index fafc20c1e..73704caa4 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -10,8 +10,8 @@ - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false" set_fact: - sync_certs: true - gen_certs: true + sync_certs: false + gen_certs: false secret_changed: false - name: "Check certs | check if a cert already exists on node" From 839b7d4a0f234affd719cc5dd8c2f6d05045bbd1 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Mon, 19 Jun 2017 10:29:03 -0400 Subject: [PATCH 14/22] Update docs. --- docs/vars.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index b3aee535a..f51f71808 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,9 +67,11 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes -* *authorization_mode* - Set this to "RBAC" (upper-case, no quotes) -[to enable Role Based Access Control](https://kubernetes.io/docs/admin/authorization/rbac/) -* *rotate_kubernetes_certs* - Set this to true to regenerate kubernetes Node certificates. *Warning: Will overwrite old certs.* +* *authorization_mode* - A list of authorization modes that the apiserver should be configured. +Supported values are `['AlwaysAllow', 'RBAC']` (Default: `['AlwaysAllow']`) +* *rotate_kubernetes_certs* - Set this to true to regenerate kubernetes node and master certificates. +Useful if the authorization mode was changed and certificate format +needs to be updated. This will not regenerate the root CA. *(!!Warning!!: Will overwrite old certs.)* Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' From eb91eab39a0abf8b2cb90c39910b759addde1e82 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Mon, 19 Jun 2017 11:00:26 -0400 Subject: [PATCH 15/22] Extract kubectl commands to resource yaml files and use kube module --- roles/kargo-defaults/defaults/main.yaml | 11 +++++++ roles/kubernetes-apps/helm/tasks/main.yml | 30 +++++++++++++------ .../manifests/tiller-clusterrolebinding.yml | 13 ++++++++ .../helm/templates/manifests/tiller-sa.yml | 7 +++++ roles/kubernetes/node/tasks/install.yml | 16 +--------- 5 files changed, 53 insertions(+), 24 deletions(-) create mode 100644 roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index 9a7368d29..91602fdaa 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -120,3 +120,14 @@ enable_network_policy: false ## at the moment. authorization_mode: ['AlwaysAllow'] rbac_enabled: "{{ 'RBAC' in authorization_mode }}" + +ssl_ca_dirs: "[ + {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} + '/usr/share/ca-certificates', + {% elif ansible_os_family == 'RedHat' -%} + '/etc/pki/tls', + '/etc/pki/ca-trust', + {% elif ansible_os_family == 'Debian' -%} + '/usr/share/ca-certificates', + {% endif -%} + ]" diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index aaa1d3c5d..a161b171b 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -10,19 +10,31 @@ mode: 0755 register: helm_container -- name: Helm | Configure tiller service account for RBAC - command: kubectl create serviceaccount tiller --namespace={{ system_namespace }} - ignore_errors: yes - when: rbac_enabled +- name: Helm | Lay Down Helm Manifests (RBAC) + template: + src: "manifests/{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: tiller, file: tiller-sa.yml, type: sa} + - {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding} + register: manifests + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled -- name: Helm | Configure tiller rolebindings for RBAC - command: kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount={{ system_namespace }}:tiller - ignore_errors: yes - when: rbac_enabled +- name: Helm | Apply Helm Manifests (RBAC) + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled - name: Helm | Install/upgrade helm command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}" - when: helm_container.changed + when: helm_container.changed or manifests|changed - name: Helm | Patch tiller deployment for RBAC command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }} diff --git a/roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml new file mode 100644 index 000000000..0ac9341ee --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tiller + namespace: {{ system_namespace }} +subjects: + - kind: ServiceAccount + name: tiller + namespace: {{ system_namespace }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml new file mode 100644 index 000000000..c840f57f8 --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tiller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index cb7a10c65..5f5fa0194 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -1,22 +1,8 @@ --- -- name: install | Set SSL CA directories - set_fact: - ssl_ca_dirs: "[ - {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} - '/usr/share/ca-certificates', - {% elif ansible_os_family == 'RedHat' -%} - '/etc/pki/tls', - '/etc/pki/ca-trust', - {% elif ansible_os_family == 'Debian' -%} - '/usr/share/ca-certificates', - {% endif -%} - ]" - tags: facts - - include: "install_{{ kubelet_deployment_type }}.yml" - name: install | Write kubelet systemd init file - template: + template: src: "kubelet.{{ kubelet_deployment_type }}.service.j2" dest: "/etc/systemd/system/kubelet.service" backup: "yes" From 4e95788e1772e690e869e2c2e8ebdee7d7e7f915 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Mon, 19 Jun 2017 13:16:57 -0400 Subject: [PATCH 16/22] Make rotate_kubernetes_certs default to false --- roles/kargo-defaults/defaults/main.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index 91602fdaa..2dfb2cd70 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -121,6 +121,9 @@ enable_network_policy: false authorization_mode: ['AlwaysAllow'] rbac_enabled: "{{ 'RBAC' in authorization_mode }}" +## Set this flag to re-create kubernetes node and master certificates !!WARNING!!: Will overwrite existing certs. +rotate_kubernetes_certs: false + ssl_ca_dirs: "[ {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} '/usr/share/ca-certificates', From ba41d3ee55a805f53dd7c0837b39a9463ed8e707 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Tue, 20 Jun 2017 10:54:02 +0800 Subject: [PATCH 17/22] kube-proxy use kubeconfig on kube-master --- .../kubernetes/node/templates/manifests/kube-proxy.manifest.j2 | 3 --- 1 file changed, 3 deletions(-) diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index bbb13bc9d..65feeee65 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -25,10 +25,7 @@ spec: - /hyperkube - proxy - --v={{ kube_log_level }} - - --master={{ kube_apiserver_endpoint }} -{% if not is_kube_master %} - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml -{% endif %} - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} - --proxy-mode={{ kube_proxy_mode }} From e58d06ddd1107bc89838c10a9d64f08d5ee5e827 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Tue, 20 Jun 2017 00:49:33 -0400 Subject: [PATCH 18/22] Remove cert rotation code. Remove disclaimer for supported auth methods. --- docs/vars.md | 5 ----- roles/kargo-defaults/defaults/main.yaml | 5 +---- roles/kubernetes-apps/ansible/defaults/main.yml | 3 +-- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- roles/kubernetes-apps/helm/tasks/main.yml | 2 +- .../templates/{manifests => }/tiller-clusterrolebinding.yml | 0 .../helm/templates/{manifests => }/tiller-sa.yml | 0 roles/kubernetes/node/tasks/pre_upgrade.yml | 2 +- roles/kubernetes/secrets/defaults/main.yml | 2 -- roles/kubernetes/secrets/tasks/check-certs.yml | 6 +++--- 10 files changed, 8 insertions(+), 19 deletions(-) rename roles/kubernetes-apps/helm/templates/{manifests => }/tiller-clusterrolebinding.yml (100%) rename roles/kubernetes-apps/helm/templates/{manifests => }/tiller-sa.yml (100%) diff --git a/docs/vars.md b/docs/vars.md index f51f71808..3756610fa 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -68,11 +68,6 @@ following default cluster paramters: * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes * *authorization_mode* - A list of authorization modes that the apiserver should be configured. -Supported values are `['AlwaysAllow', 'RBAC']` (Default: `['AlwaysAllow']`) -* *rotate_kubernetes_certs* - Set this to true to regenerate kubernetes node and master certificates. -Useful if the authorization mode was changed and certificate format -needs to be updated. This will not regenerate the root CA. *(!!Warning!!: Will overwrite old certs.)* - Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index 2dfb2cd70..d13a565aa 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -116,13 +116,10 @@ efk_enabled: false enable_network_policy: false ## List of authorization plugins that must be configured for -## the k8s cluster. Only 'AlwaysAllow' and 'RBAC' is supported -## at the moment. +## the k8s cluster. authorization_mode: ['AlwaysAllow'] rbac_enabled: "{{ 'RBAC' in authorization_mode }}" -## Set this flag to re-create kubernetes node and master certificates !!WARNING!!: Will overwrite existing certs. -rotate_kubernetes_certs: false ssl_ca_dirs: "[ {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 13f8e41a2..60a707fd6 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,8 +41,7 @@ netchecker_server_memory_requests: 64M etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" -# RBAC specific resources that will be ignored when RBAC is not enabled. -apiserver_rbac_resources: +kubedns_rbac_resources: - clusterrole, - clusterrolebinding, - sa diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index dad3e3c51..1259f7204 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -21,7 +21,7 @@ - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in apiserver_rbac_resources or rbac_enabled) + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in kubedns_rbac_resources or rbac_enabled) tags: dnsmasq # see https://github.com/kubernetes/kubernetes/issues/45084 diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index a161b171b..c6cabf288 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -12,7 +12,7 @@ - name: Helm | Lay Down Helm Manifests (RBAC) template: - src: "manifests/{{item.file}}" + src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {name: tiller, file: tiller-sa.yml, type: sa} diff --git a/roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml similarity index 100% rename from roles/kubernetes-apps/helm/templates/manifests/tiller-clusterrolebinding.yml rename to roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml diff --git a/roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml similarity index 100% rename from roles/kubernetes-apps/helm/templates/manifests/tiller-sa.yml rename to roles/kubernetes-apps/helm/templates/tiller-sa.yml diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml index 291817562..d6f729890 100644 --- a/roles/kubernetes/node/tasks/pre_upgrade.yml +++ b/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -7,4 +7,4 @@ - name: "Pre-upgrade | Make sure to restart kubelet if certificates changed" command: /bin/true - notify: restart kubelet if secrets changed \ No newline at end of file + notify: restart kubelet if secrets changed diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml index 3b65c23a9..e6177857e 100644 --- a/roles/kubernetes/secrets/defaults/main.yml +++ b/roles/kubernetes/secrets/defaults/main.yml @@ -1,4 +1,2 @@ --- kube_cert_group: kube-cert - -rotate_kubernetes_certs: false # set this to true to regenerate certificates diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 73704caa4..1f2d9cb1d 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -25,7 +25,7 @@ - name: "Check_certs | Set 'gen_certs' to true" set_fact: gen_certs: true - when: "rotate_kubernetes_certs or item not in (kubecert_master.files|map(attribute='path')|list)" + when: "item not in (kubecert_master.files|map(attribute='path')|list)" run_once: true with_items: >- ['{{ kube_cert_dir }}/ca.pem', @@ -41,7 +41,7 @@ {% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %} {% for host in groups['k8s-cluster'] -%} {% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %} - {% if host_cert in existing_certs and not rotate_kubernetes_certs -%} + {% if host_cert in existing_certs -%} "{{ host }}": False, {% else -%} "{{ host }}": True, @@ -62,5 +62,5 @@ (kubecert_node.results[1].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[1].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} - {{ rotate_kubernetes_certs or certs.sync }} + {{ certs.sync }} From 41298ccea57bafb13ff039d91a55a29eebe86c7f Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Tue, 20 Jun 2017 13:29:31 -0400 Subject: [PATCH 19/22] Reverted leftover tasks from cert rotation functionality. --- roles/kargo-defaults/defaults/main.yaml | 17 +++-------------- .../ansible/templates/kubedns-autoscaler.yml | 1 - roles/kubernetes/node/handlers/main.yml | 5 ----- roles/kubernetes/node/tasks/install.yml | 14 ++++++++++++++ roles/kubernetes/node/tasks/pre_upgrade.yml | 4 ---- .../secrets/tasks/gen_certs_script.yml | 2 ++ 6 files changed, 19 insertions(+), 24 deletions(-) diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index d13a565aa..3a1d29667 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -115,19 +115,8 @@ k8s_image_pull_policy: IfNotPresent efk_enabled: false enable_network_policy: false -## List of authorization plugins that must be configured for -## the k8s cluster. +## List of authorization modes that must be configured for +## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and +## 'RBAC' modes are tested. authorization_mode: ['AlwaysAllow'] rbac_enabled: "{{ 'RBAC' in authorization_mode }}" - - -ssl_ca_dirs: "[ - {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} - '/usr/share/ca-certificates', - {% elif ansible_os_family == 'RedHat' -%} - '/etc/pki/tls', - '/etc/pki/ca-trust', - {% elif ansible_os_family == 'Debian' -%} - '/usr/share/ca-certificates', - {% endif -%} - ]" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index 28a67af4f..ecde5dce2 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -47,4 +47,3 @@ spec: - --logtostderr=true - --v=2 serviceAccountName: cluster-proportional-autoscaler - serviceAccount: cluster-proportional-autoscaler diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml index 079cbd3da..00525b995 100644 --- a/roles/kubernetes/node/handlers/main.yml +++ b/roles/kubernetes/node/handlers/main.yml @@ -1,9 +1,4 @@ --- -- name: restart kubelet if secrets changed - command: /bin/true - when: secret_changed|d(False) - notify: restart kubelet - - name: restart kubelet command: /bin/true notify: diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index 5f5fa0194..ad4cbacf1 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -1,4 +1,18 @@ --- +- name: install | Set SSL CA directories + set_fact: + ssl_ca_dirs: "[ + {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%} + '/usr/share/ca-certificates', + {% elif ansible_os_family == 'RedHat' -%} + '/etc/pki/tls', + '/etc/pki/ca-trust', + {% elif ansible_os_family == 'Debian' -%} + '/usr/share/ca-certificates', + {% endif -%} + ]" + tags: facts + - include: "install_{{ kubelet_deployment_type }}.yml" - name: install | Write kubelet systemd init file diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml index d6f729890..612dd3e6f 100644 --- a/roles/kubernetes/node/tasks/pre_upgrade.yml +++ b/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -4,7 +4,3 @@ args: creates: "/var/lib/cni" failed_when: false - -- name: "Pre-upgrade | Make sure to restart kubelet if certificates changed" - command: /bin/true - notify: restart kubelet if secrets changed diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 54afed35d..61d9c7826 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -136,6 +136,7 @@ - name: Gen_certs | Unpack certs on masters shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}" no_log: true + changed_when: false check_mode: no when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and inventory_hostname != groups['kube-master'][0] @@ -153,6 +154,7 @@ args: executable: /bin/bash no_log: true + changed_when: false check_mode: no when: inventory_hostname in groups['kube-node'] and sync_certs|default(false) and From 33de4def2f975099d679be2a28747c2c5a05309d Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Tue, 20 Jun 2017 13:31:44 -0400 Subject: [PATCH 20/22] Add note about auth modes --- docs/vars.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/vars.md b/docs/vars.md index 3756610fa..7608af347 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,7 +67,10 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes -* *authorization_mode* - A list of authorization modes that the apiserver should be configured. +* *authorization_mode* - A list of [authorization modes]( +https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) + that the cluster should be configured for. Defaults to `['AlwaysAllow']`. + Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` From 27a10c96238adeb164d762c60d080b0e9dfadbc7 Mon Sep 17 00:00:00 2001 From: Raj Perera Date: Tue, 20 Jun 2017 13:44:14 -0400 Subject: [PATCH 21/22] Whitespace fixes --- docs/vars.md | 1 - roles/kubernetes-apps/helm/tasks/main.yml | 1 - roles/kubernetes/master/defaults/main.yml | 1 - roles/kubernetes/secrets/tasks/check-certs.yml | 2 +- 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index 7608af347..c2cfbd3a0 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -120,4 +120,3 @@ The possible vars are: Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their passwords default to changeme. You can set this by changing ``kube_api_pwd``. - diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index c6cabf288..c70787664 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -43,4 +43,3 @@ - name: Helm | Set up bash completion shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh" when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] ) - diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 0536432e5..7cfe9cc9a 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -65,4 +65,3 @@ apiserver_custom_flags: [] controller_mgr_custom_flags: [] scheduler_custom_flags: [] - diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 1f2d9cb1d..061b04b04 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -25,7 +25,7 @@ - name: "Check_certs | Set 'gen_certs' to true" set_fact: gen_certs: true - when: "item not in (kubecert_master.files|map(attribute='path')|list)" + when: "not item in kubecert_master.files|map(attribute='path') | list" run_once: true with_items: >- ['{{ kube_cert_dir }}/ca.pem', From 4cfffba749edb1ccec1103d1c264ab6cee64f353 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Wed, 21 Jun 2017 11:30:15 +0800 Subject: [PATCH 22/22] minor tune after merge @rajiteh's work --- docs/vars.md | 2 +- roles/kargo-defaults/defaults/main.yaml | 4 ++-- roles/kubernetes-apps/ansible/defaults/main.yml | 5 ++--- roles/kubernetes-apps/ansible/tasks/main.yml | 4 +++- .../kubernetes-apps/ansible/templates/kubedns-autoscaler.yml | 2 ++ .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- 6 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index c2cfbd3a0..8976bc695 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,7 +67,7 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes -* *authorization_mode* - A list of [authorization modes]( +* *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `['AlwaysAllow']`. Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index 3a1d29667..14d513458 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -118,5 +118,5 @@ enable_network_policy: false ## List of authorization modes that must be configured for ## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and ## 'RBAC' modes are tested. -authorization_mode: ['AlwaysAllow'] -rbac_enabled: "{{ 'RBAC' in authorization_mode }}" +authorization_modes: ['AlwaysAllow'] +rbac_enabled: "{{ 'RBAC' in authorization_modes }}" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 60a707fd6..1263f9df2 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -42,7 +42,6 @@ etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" kubedns_rbac_resources: - - clusterrole, - - clusterrolebinding, + - clusterrole + - clusterrolebinding - sa - - serviceaccount diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 1259f7204..27e453032 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -21,7 +21,9 @@ - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in kubedns_rbac_resources or rbac_enabled) + when: + - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in kubedns_rbac_resources tags: dnsmasq # see https://github.com/kubernetes/kubernetes/issues/45084 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index ecde5dce2..5d6190f93 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -46,4 +46,6 @@ spec: - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --logtostderr=true - --v=2 +{% if rbac_enabled %} serviceAccountName: cluster-proportional-autoscaler +{% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index fddd66a27..07a48e728 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -81,8 +81,8 @@ spec: {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - --anonymous-auth={{ kube_api_anonymous_auth }} {% endif %} -{% if authorization_mode %} - - --authorization-mode={{ authorization_mode|join(',') }} +{% if authorization_modes %} + - --authorization-mode={{ authorization_modes|join(',') }} {% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }}