Basic RBAC functionality. (Based from work done by @jwfang (#1351))

* Add a flag "authorization_method", when set to "RBAC" enables role based access control.
* Add required cluster roles and bindings for kube-dns
* Patch tiller deployment to use a service account with proper credentials.
* Add a flag to regenerate kubernetes certs on the nodes.
This commit is contained in:
Raj Perera 2017-06-16 10:28:23 -04:00
parent b73786c6d5
commit 0dc38ff9b3
23 changed files with 203 additions and 19 deletions

View file

@ -67,6 +67,10 @@ following default cluster paramters:
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
Kubernetes
* *authorization_mode* - Set this to "RBAC" (upper-case, no quotes)
[to enable Role Based Access Control](https://kubernetes.io/docs/admin/authorization/rbac/)
* *rotate_kubernetes_certs* - Set this to true to regenerate kubernetes Node certificates. *Warning: Will overwrite old certs.*
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses``
@ -116,3 +120,4 @@ The possible vars are:
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
passwords default to changeme. You can set this by changing ``kube_api_pwd``.

View file

@ -40,3 +40,11 @@ netchecker_server_memory_requests: 64M
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"
# RBAC
rbac_resources:
- clusterrole,
- clusterrolebinding,
- sa
rbac_enabled: "{{ authorization_mode == 'RBAC' }}"

View file

@ -13,11 +13,15 @@
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: kubedns, file: kubedns-sa.yml, type: sa}
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
- {name: kubedns, file: kubedns-svc.yml, type: svc}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and (item.type not in rbac_resources or rbac_enabled)
tags: dnsmasq
- name: Kubernetes Apps | Start Resources
@ -29,6 +33,7 @@
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
tags: dnsmasq
@ -36,3 +41,4 @@
include: tasks/netchecker.yml
when: deploy_netchecker
tags: netchecker

View file

@ -0,0 +1,32 @@
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]

View file

@ -0,0 +1,27 @@
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system
subjects:
- kind: ServiceAccount
name: cluster-proportional-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-proportional-autoscaler
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,19 @@
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ServiceAccount
apiVersion: v1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system

View file

@ -46,4 +46,5 @@ spec:
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
serviceAccountName: cluster-proportional-autoscaler
serviceAccount: cluster-proportional-autoscaler

View file

@ -114,3 +114,7 @@ spec:
- containerPort: 8080
protocol: TCP
dnsPolicy: Default # Don't use cluster DNS.
{% if authorization_mode is defined and authorization_mode == "RBAC" %}
serviceAccount: kube-dns
serviceAccountName: kube-dns
{% endif %}

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns

View file

@ -2,3 +2,5 @@ helm_enabled: false
# specify a dir and attach it to helm for HELM_HOME.
helm_home_dir: "/root/.helm"
rbac_enabled: "{{ authorization_mode == 'RBAC' }}"

View file

@ -10,10 +10,28 @@
mode: 0755
register: helm_container
- name: Helm | Configure tiller service account for RBAC
command: kubectl create serviceaccount tiller --namespace=kube-system
ignore_errors: yes
when: rbac_enabled
- name: Helm | Configure tiller rolebindings for RBAC
command: kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
ignore_errors: yes
when: rbac_enabled
- name: Helm | Install/upgrade helm
command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
when: helm_container.changed
- name: Helm | Patch tiller deployment for RBAC
shell: >
kubectl --namespace=kube-system get deployment tiller-deploy -o json | \
python -c 'import sys,json;a=json.load(sys.stdin);a["spec"]["template"]["spec"]["serviceAccount"]="tiller";json.dump(a,sys.stdout)' | \
kubectl apply -f -
when: rbac_enabled
- name: Helm | Set up bash completion
shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh"
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )

View file

@ -65,3 +65,5 @@ apiserver_custom_flags: []
controller_mgr_custom_flags: []
scheduler_custom_flags: []
authorization_mode: RBAC

View file

@ -81,6 +81,9 @@ spec:
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
- --anonymous-auth={{ kube_api_anonymous_auth }}
{% endif %}
{% if authorization_mode %}
- --authorization-mode={{ authorization_mode }}
{% endif %}
{% if apiserver_custom_flags is string %}
- {{ apiserver_custom_flags }}
{% else %}

View file

@ -35,6 +35,9 @@ spec:
- --node-monitor-period={{ kube_controller_node_monitor_period }}
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
- --v={{ kube_log_level }}
{% if authorization_mode is defined and authorization_mode == "RBAC" %}
- --use-service-account-credentials
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
- --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config

View file

@ -1,4 +1,9 @@
---
- name: restart kubelet if secrets changed
command: /bin/true
when: secret_changed|d(False)
notify: restart kubelet
- name: restart kubelet
command: /bin/true
notify:

View file

@ -30,9 +30,12 @@
- name: write the kubecfg (auth) file for kubelet
template:
src: node-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
src: "{{ item }}-kubeconfig.yaml.j2"
dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
backup: yes
with_items:
- node
- kube-proxy
notify: restart kubelet
tags: kubelet

View file

@ -4,3 +4,7 @@
args:
creates: "/var/lib/cni"
failed_when: false
- name: "Pre-upgrade | Make sure to restart kubelet if certificates changed"
command: /bin/true
notify: restart kubelet if secrets changed

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users:
- name: kube-proxy
user:
client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
contexts:
- context:
cluster: local
user: kube-proxy
name: kube-proxy-{{ cluster_name }}
current-context: kube-proxy-{{ cluster_name }}

View file

@ -27,7 +27,7 @@ spec:
- --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %}
- --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml
- --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
{% endif %}
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }}
@ -41,7 +41,7 @@ spec:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: {{kube_config_dir}}/node-kubeconfig.yaml
- mountPath: {{kube_config_dir}}/kube-proxy-kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: {{kube_config_dir}}/ssl
@ -60,7 +60,7 @@ spec:
{% endif %}
- name: "kubeconfig"
hostPath:
path: "{{kube_config_dir}}/node-kubeconfig.yaml"
path: "{{kube_config_dir}}/kube-proxy-kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "{{kube_config_dir}}/ssl"

View file

@ -1,2 +1,4 @@
---
kube_cert_group: kube-cert
rotate_kubernetes_certs: false # set this to true to regenerate certificates

View file

@ -80,6 +80,7 @@ if [ ! -e "$SSLDIR/ca-key.pem" ]; then
cat ca.pem >> apiserver.pem
fi
# Admins
if [ -n "$MASTERS" ]; then
for host in $MASTERS; do
cn="${host%%.*}"
@ -90,16 +91,28 @@ if [ -n "$MASTERS" ]; then
done
fi
# Nodes and Admin
# Nodes
if [ -n "$HOSTS" ]; then
for host in $HOSTS; do
cn="${host%%.*}"
# node key
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}/O=system:nodes" > /dev/null 2>&1
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1
done
fi
# system:kube-proxy
if [ -n "$HOSTS" ]; then
for host in $HOSTS; do
cn="${host%%.*}"
# kube-proxy key
openssl genrsa -out kube-proxy-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key kube-proxy-${host}-key.pem -out kube-proxy-${host}.csr -subj "/CN=system:kube-proxy" > /dev/null 2>&1
openssl x509 -req -in kube-proxy-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out kube-proxy-${host}.pem -days 3650 > /dev/null 2>&1
done
fi
# Install certs
mv *.pem ${SSLDIR}/

View file

@ -10,8 +10,8 @@
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
set_fact:
sync_certs: false
gen_certs: false
sync_certs: true
gen_certs: true
secret_changed: false
- name: "Check certs | check if a cert already exists on node"
@ -25,7 +25,7 @@
- name: "Check_certs | Set 'gen_certs' to true"
set_fact:
gen_certs: true
when: "not item in kubecert_master.files|map(attribute='path') | list"
when: "rotate_kubernetes_certs or item not in (kubecert_master.files|map(attribute='path')|list)"
run_once: true
with_items: >-
['{{ kube_cert_dir }}/ca.pem',
@ -41,7 +41,7 @@
{% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
{% for host in groups['k8s-cluster'] -%}
{% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %}
{% if host_cert in existing_certs -%}
{% if host_cert in existing_certs and not rotate_kubernetes_certs -%}
"{{ host }}": False,
{% else -%}
"{{ host }}": True,
@ -62,5 +62,5 @@
(kubecert_node.results[1].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[1].stat.path)|map(attribute="checksum")|first|default('')) -%}
{%- set _ = certs.update({'sync': True}) -%}
{% endif %}
{{ certs.sync }}
{{ rotate_kubernetes_certs or certs.sync }}

View file

@ -72,8 +72,15 @@
{% for node in groups['k8s-cluster'] %}
'node-{{ node }}.pem',
'node-{{ node }}-key.pem',
'kube-proxy-{{ node }}.pem',
'kube-proxy-{{ node }}-key.pem',
{% endfor %}]"
my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem']
my_node_certs: ['ca.pem',
'node-{{ inventory_hostname }}.pem',
'node-{{ inventory_hostname }}-key.pem',
'kube-proxy-{{ inventory_hostname }}.pem',
'kube-proxy-{{ inventory_hostname }}-key.pem',
]
tags: facts
- name: Gen_certs | Gather master certs
@ -121,7 +128,6 @@
- name: Gen_certs | Unpack certs on masters
shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}"
no_log: true
changed_when: false
check_mode: no
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
@ -139,7 +145,6 @@
args:
executable: /bin/bash
no_log: true
changed_when: false
check_mode: no
when: inventory_hostname in groups['kube-node'] and
sync_certs|default(false) and