From 092bf07cbf57602a289330d0a1971f16f5d8e38e Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Tue, 27 Jun 2017 12:27:25 +0800 Subject: [PATCH] basic rbac support --- docs/vars.md | 4 ++ .../kubernetes-apps/ansible/defaults/main.yml | 4 ++ roles/kubernetes-apps/ansible/tasks/main.yml | 30 ++++++++++++-- .../kubedns-autoscaler-clusterrole.yml | 32 +++++++++++++++ .../kubedns-autoscaler-clusterrolebinding.yml | 27 ++++++++++++ .../templates/kubedns-autoscaler-sa.yml | 19 +++++++++ .../ansible/templates/kubedns-autoscaler.yml | 8 ++-- .../ansible/templates/kubedns-deploy.yml | 4 +- .../ansible/templates/kubedns-sa.yml | 7 ++++ roles/kubernetes-apps/helm/tasks/main.yml | 26 ++++++++++++ .../templates/tiller-clusterrolebinding.yml | 13 ++++++ .../helm/templates/tiller-sa.yml | 7 ++++ roles/kubernetes/master/defaults/main.yml | 2 +- roles/kubernetes/master/tasks/main.yml | 22 +++++++--- ...kube-controller-manager-kubeconfig.yaml.j2 | 18 ++++++++ .../kube-scheduler-kubeconfig.yaml.j2 | 18 ++++++++ .../manifests/kube-apiserver.manifest.j2 | 3 ++ .../kube-controller-manager.manifest.j2 | 35 ++++++++++++---- .../manifests/kube-scheduler.manifest.j2 | 28 ++++++++++++- roles/kubernetes/node/tasks/install.yml | 2 +- roles/kubernetes/node/tasks/main.yml | 7 +++- .../templates/kube-proxy-kubeconfig.yaml.j2 | 18 ++++++++ .../manifests/kube-proxy.manifest.j2 | 27 ++++++------ .../kubernetes/preinstall/tasks/set_facts.yml | 2 +- roles/kubernetes/secrets/files/make-ssl.sh | 41 +++++++++++++------ .../secrets/tasks/gen_certs_script.yml | 23 +++++++++-- roles/kubespray-defaults/defaults/main.yaml | 6 +++ 27 files changed, 374 insertions(+), 59 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-sa.yml create mode 100644 roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/helm/templates/tiller-sa.yml create mode 100644 roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 diff --git a/docs/vars.md b/docs/vars.md index 4b9da186e..46684395f 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,6 +67,10 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes +* *authorization_modes* - A list of [authorization mode]( +https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) + that the cluster should be configured for. Defaults to `['AlwaysAllow']`. + Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 2787472c8..97d1bcdc4 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,3 +41,7 @@ netchecker_server_memory_requests: 64M etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" +kubedns_rbac_resources: + - clusterrole + - clusterrolebinding + - sa diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 4e7236df6..00a1fd74d 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,11 +13,34 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: - - {name: kube-dns, file: kubedns-deploy.yml, type: deployment} - - {name: kube-dns, file: kubedns-svc.yml, type: svc} + - {name: kubedns, file: kubedns-sa.yml, type: sa} + - {name: kubedns, file: kubedns-deploy.yml, type: deployment} + - {name: kubedns, file: kubedns-svc.yml, type: svc} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + when: + - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in kubedns_rbac_resources + tags: dnsmasq + +# see https://github.com/kubernetes/kubernetes/issues/45084 +# TODO: this is only needed for "old" kube-dns +- name: Kubernetes Apps | Patch system:kube-dns ClusterRole + command: > + {{bin_dir}}/kubectl patch clusterrole system:kube-dns + --patch='{ + "rules": [ + { + "apiGroups" : [""], + "resources" : ["endpoints", "services"], + "verbs": ["list", "watch", "get"] + } + ] + }' + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled tags: dnsmasq - name: Kubernetes Apps | Start Resources @@ -29,6 +52,7 @@ filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] tags: dnsmasq diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml new file mode 100644 index 000000000..a194426c6 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -0,0 +1,32 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml new file mode 100644 index 000000000..a368ae333 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +subjects: + - kind: ServiceAccount + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +roleRef: + kind: ClusterRole + name: cluster-proportional-autoscaler + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml new file mode 100644 index 000000000..9544a7dd9 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -0,0 +1,19 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index a1d5455ad..9e0462290 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -39,11 +39,13 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace=kube-system + - --namespace={{ system_namespace }} - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kube-dns - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --logtostderr=true - --v=2 - +{% if rbac_enabled %} + serviceAccountName: cluster-proportional-autoscaler +{% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index 3f07aa905..7e4615676 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -151,4 +151,6 @@ spec: memory: 20Mi cpu: 10m dnsPolicy: Default # Don't use cluster DNS. - +{% if rbac_enabled %} + serviceAccountName: kube-dns +{% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml new file mode 100644 index 000000000..e520ccbfc --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index f12875da2..2d26c5a0f 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -10,10 +10,36 @@ mode: 0755 register: helm_container +- name: Helm | Lay Down Helm Manifests (RBAC) + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: tiller, file: tiller-sa.yml, type: sa} + - {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding} + register: manifests + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + +- name: Helm | Apply Helm Manifests (RBAC) + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + - name: Helm | Install/upgrade helm command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}" when: helm_container.changed +- name: Helm | Patch tiller deployment for RBAC + command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }} + when: rbac_enabled + - name: Helm | Set up bash completion shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh" when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] ) diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml new file mode 100644 index 000000000..0ac9341ee --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tiller + namespace: {{ system_namespace }} +subjects: + - kind: ServiceAccount + name: tiller + namespace: {{ system_namespace }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml new file mode 100644 index 000000000..c840f57f8 --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tiller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 785ef43af..7cfe9cc9a 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -64,4 +64,4 @@ apiserver_custom_flags: [] controller_mgr_custom_flags: [] -scheduler_custom_flags: [] \ No newline at end of file +scheduler_custom_flags: [] diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index dadef4bf5..6922e6a51 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -60,12 +60,11 @@ when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] tags: apps -- name: Write kube-controller-manager manifest +- name: Write kube-scheduler kubeconfig template: - src: manifests/kube-controller-manager.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" - notify: Master | wait for kube-controller-manager - tags: kube-controller-manager + src: kube-scheduler-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + tags: kube-scheduler - name: Write kube-scheduler manifest template: @@ -74,6 +73,19 @@ notify: Master | wait for kube-scheduler tags: kube-scheduler +- name: Write kube-controller-manager kubeconfig + template: + src: kube-controller-manager-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + tags: kube-controller-manager + +- name: Write kube-controller-manager manifest + template: + src: manifests/kube-controller-manager.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" + notify: Master | wait for kube-controller-manager + tags: kube-controller-manager + - include: post-upgrade.yml tags: k8s-post-upgrade diff --git a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 new file mode 100644 index 000000000..887d022c1 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-controller-manager + user: + client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem + client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem +contexts: +- context: + cluster: local + user: kube-controller-manager + name: kube-controller-manager-{{ cluster_name }} +current-context: kube-controller-manager-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 new file mode 100644 index 000000000..974b72427 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-scheduler + user: + client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem + client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem +contexts: +- context: + cluster: local + user: kube-scheduler + name: kube-scheduler-{{ cluster_name }} +current-context: kube-scheduler-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index bf4979596..24094fefb 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -81,6 +81,9 @@ spec: {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - --anonymous-auth={{ kube_api_anonymous_auth }} {% endif %} +{% if authorization_modes %} + - --authorization-mode={{ authorization_modes|join(',') }} +{% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} {% else %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index d3f8a23a5..a6b69fa14 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -24,7 +24,7 @@ spec: command: - /hyperkube - controller-manager - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem @@ -35,6 +35,9 @@ spec: - --node-monitor-period={{ kube_controller_node_monitor_period }} - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }} - --v={{ kube_log_level }} +{% if rbac_enabled %} + - --use-service-account-credentials +{% endif %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config @@ -61,20 +64,36 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{kube_config_dir}}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + name: kubeconfig readOnly: true {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %} - - mountPath: {{ kube_config_dir }}/cloud_config + - mountPath: "{{ kube_config_dir }}/cloud_config" name: cloudconfig readOnly: true {% endif %} volumes: - - hostPath: - path: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - hostPath: - path: {{ kube_config_dir }}/cloud_config + path: "{{ kube_config_dir }}/cloud_config" name: cloudconfig {% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 441f991eb..fdc16bf7f 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kube-scheduler spec: @@ -25,7 +25,7 @@ spec: - /hyperkube - scheduler - --leader-elect=true - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --v={{ kube_log_level }} {% if scheduler_custom_flags is string %} - {{ scheduler_custom_flags }} @@ -41,3 +41,27 @@ spec: port: 10251 initialDelaySeconds: 30 timeoutSeconds: 10 + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + name: kubeconfig + readOnly: true + volumes: + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index cb7a10c65..ad4cbacf1 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -16,7 +16,7 @@ - include: "install_{{ kubelet_deployment_type }}.yml" - name: install | Write kubelet systemd init file - template: + template: src: "kubelet.{{ kubelet_deployment_type }}.service.j2" dest: "/etc/systemd/system/kubelet.service" backup: "yes" diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index f09845f76..e0558f8cd 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -30,9 +30,12 @@ - name: write the kubecfg (auth) file for kubelet template: - src: node-kubeconfig.yaml.j2 - dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + src: "{{ item }}-kubeconfig.yaml.j2" + dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml" backup: yes + with_items: + - node + - kube-proxy notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 new file mode 100644 index 000000000..18c47cd3e --- /dev/null +++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-proxy + user: + client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-{{ cluster_name }} +current-context: kube-proxy-{{ cluster_name }} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 9b7d53857..65feeee65 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -25,10 +25,7 @@ spec: - /hyperkube - proxy - --v={{ kube_log_level }} - - --master={{ kube_apiserver_endpoint }} -{% if not is_kube_master %} - - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml -{% endif %} + - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} - --proxy-mode={{ kube_proxy_mode }} @@ -41,14 +38,14 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml - name: "kubeconfig" + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl readOnly: true - - mountPath: {{kube_config_dir}}/ssl - name: "etc-kube-ssl" + - mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + name: kubeconfig readOnly: true - mountPath: /var/run/dbus - name: "var-run-dbus" + name: var-run-dbus readOnly: false volumes: - name: ssl-certs-host @@ -58,12 +55,12 @@ spec: {% else %} path: /usr/share/ca-certificates {% endif %} - - name: "kubeconfig" + - name: etc-kube-ssl hostPath: - path: "{{kube_config_dir}}/node-kubeconfig.yaml" - - name: "etc-kube-ssl" + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig hostPath: - path: "{{kube_config_dir}}/ssl" - - name: "var-run-dbus" + path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + - name: var-run-dbus hostPath: - path: "/var/run/dbus" + path: /var/run/dbus diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 03057829d..edfac2e2e 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -23,7 +23,7 @@ {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} {%- elif is_kube_master -%} - http://127.0.0.1:{{ kube_apiserver_insecure_port }} + https://127.0.0.1:{{ kube_apiserver_port }} {%- else -%} {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 55ea13d1e..e8574cc6b 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -72,32 +72,47 @@ else openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 fi +gen_key_and_cert() { + local name=$1 + local subject=$2 + openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 +} + if [ ! -e "$SSLDIR/ca-key.pem" ]; then - # kube-apiserver key - openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 + # kube-apiserver + gen_key_and_cert "apiserver" "/CN=kube-apiserver" cat ca.pem >> apiserver.pem + # kube-scheduler + gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler" + # kube-controller-manager + gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager" fi +# Admins if [ -n "$MASTERS" ]; then for host in $MASTERS; do cn="${host%%.*}" - # admin key - openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1 - openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1 + # admin + gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters" done fi -# Nodes and Admin +# Nodes if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" - # node key - openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1 - openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1 + gen_key_and_cert "node-${host}" "/CN=system:node:${cn}/O=system:nodes" + done +fi + +# system:kube-proxy +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + # kube-proxy + gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy" done fi diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 8df2195bf..61d9c7826 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -56,24 +56,39 @@ - set_fact: all_master_certs: "['ca-key.pem', + 'apiserver.pem', + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', - 'apiserver.pem', - 'apiserver-key.pem', {% endfor %}]" my_master_certs: ['ca-key.pem', 'admin-{{ inventory_hostname }}.pem', 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', - 'apiserver-key.pem' + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', ] all_node_certs: "['ca.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', 'node-{{ node }}-key.pem', + 'kube-proxy-{{ node }}.pem', + 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" - my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem'] + my_node_certs: ['ca.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem', + ] tags: facts - name: Gen_certs | Gather master certs diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c18afd39b..db5fc1997 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -114,3 +114,9 @@ vault_deployment_type: docker k8s_image_pull_policy: IfNotPresent efk_enabled: false enable_network_policy: false + +## List of authorization modes that must be configured for +## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and +## 'RBAC' modes are tested. +authorization_modes: ['AlwaysAllow'] +rbac_enabled: "{{ 'RBAC' in authorization_modes }}"