Add RBAC support for canal (#1604)

Refactored how rbac_enabled is set
Added RBAC to ubuntu-canal-ha CI job
Added rbac for calico policy controller
This commit is contained in:
Matthew Mosesohn 2017-09-04 11:29:40 +03:00 committed by GitHub
parent 702ce446df
commit a3e6896a43
18 changed files with 274 additions and 46 deletions

View file

@ -269,9 +269,10 @@ before_script:
##User-data to simply turn off coreos upgrades ##User-data to simply turn off coreos upgrades
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables .ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal KUBE_NETWORK_PLUGIN: canal
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: europe-west1-b CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha CLUSTER_MODE: ha
@ -445,24 +446,24 @@ ubuntu-weave-sep-triggers:
only: ['triggers'] only: ['triggers']
# More builds for PRs/merges (manual) and triggers (auto) # More builds for PRs/merges (manual) and triggers (auto)
ubuntu-canal-ha: ubuntu-canal-ha-rbac:
stage: deploy-gce-part1 stage: deploy-gce-part1
<<: *job <<: *job
<<: *gce <<: *gce
variables: variables:
<<: *gce_variables <<: *gce_variables
<<: *ubuntu_canal_ha_variables <<: *ubuntu_canal_ha_rbac_variables
when: manual when: manual
except: ['triggers'] except: ['triggers']
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
ubuntu-canal-ha-triggers: ubuntu-canal-ha-rbac-triggers:
stage: deploy-gce-part1 stage: deploy-gce-part1
<<: *job <<: *job
<<: *gce <<: *gce
variables: variables:
<<: *gce_variables <<: *gce_variables
<<: *ubuntu_canal_ha_variables <<: *ubuntu_canal_ha_rbac_variables
when: on_success when: on_success
only: ['triggers'] only: ['triggers']

View file

@ -25,12 +25,14 @@ spec:
memory: {{ netchecker_server_memory_requests }} memory: {{ netchecker_server_memory_requests }}
ports: ports:
- containerPort: 8081 - containerPort: 8081
hostPort: 8081
args: args:
- "-v=5" - "-v=5"
- "-logtostderr" - "-logtostderr"
- "-kubeproxyinit" - "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081" - "-endpoint=0.0.0.0:8081"
tolerations:
- effect: NoSchedule
operator: Exists
{% if rbac_enabled %} {% if rbac_enabled %}
serviceAccountName: netchecker-server serviceAccountName: netchecker-server
{% endif %} {% endif %}

View file

@ -1,20 +1,11 @@
--- ---
- name: Create canal ConfigMap - name: Canal | Start Resources
run_once: true
kube: kube:
name: "canal-config" name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/canal-config.yaml" resource: "{{item.item.type}}"
resource: "configmap" filename: "{{kube_config_dir}}/{{item.item.file}}"
namespace: "{{system_namespace}}" state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ canal_manifests.results }}"
- name: Start flannel and calico-node when: inventory_hostname == groups['kube-master'][0]
run_once: true
kube:
name: "canal-node"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/canal-node.yaml"
resource: "ds"
namespace: "{{system_namespace}}"
state: "{{ item | ternary('latest','present') }}"
with_items: "{{ canal_node_manifest.changed }}"

View file

@ -8,3 +8,8 @@ calico_policy_controller_memory_requests: 64M
# SSL # SSL
calico_cert_dir: "/etc/calico/certs" calico_cert_dir: "/etc/calico/certs"
canal_cert_dir: "/etc/canal/certs" canal_cert_dir: "/etc/canal/certs"
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -1,22 +1,49 @@
--- ---
- set_fact: - name: Set cert dir
set_fact:
calico_cert_dir: "{{ canal_cert_dir }}" calico_cert_dir: "{{ canal_cert_dir }}"
when: kube_network_plugin == 'canal' when: kube_network_plugin == 'canal'
tags: [facts, canal] tags: [facts, canal]
- name: Write calico-policy-controller yaml - name: Get calico-policy-controller version if running
shell: "{{ bin_dir }}/kubectl -n {{ system_namespace }} get rs calico-policy-controller -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d':' -f2"
register: existing_calico_policy_version
run_once: true
failed_when: false
# FIXME(mattymo): This should not be necessary
- name: Delete calico-policy-controller if an old one is installed
kube:
name: calico-policy-controller
kubectl: "{{bin_dir}}/kubectl"
resource: rs
namespace: "{{ system_namespace }}"
state: absent
run_once: true
when:
- not "NotFound" in existing_calico_policy_version.stderr
- existing_calico_policy_version.stdout | version_compare('v0.7.0', '<')
- name: Create calico-policy-controller manifests
template: template:
src: calico-policy-controller.yml.j2 src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/calico-policy-controller.yml" dest: "{{kube_config_dir}}/{{item.file}}"
when: inventory_hostname == groups['kube-master'][0] with_items:
tags: canal - {name: calico-policy-controller, file: calico-policy-controller.yml, type: rs}
- {name: calico-policy-controller, file: calico-policy-sa.yml, type: sa}
- {name: calico-policy-controller, file: calico-policy-cr.yml, type: clusterrole}
- {name: calico-policy-controller, file: calico-policy-crb.yml, type: clusterrolebinding}
register: calico_policy_manifests
when:
- rbac_enabled or item.type not in rbac_resources
- name: Start of Calico policy controller - name: Start of Calico policy controller
kube: kube:
name: "calico-policy-controller" name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/calico-policy-controller.yml" resource: "{{item.item.type}}"
namespace: "{{system_namespace}}" filename: "{{kube_config_dir}}/{{item.item.file}}"
resource: "rs" state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ calico_policy_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
tags: canal

View file

@ -15,15 +15,18 @@ spec:
template: template:
metadata: metadata:
name: calico-policy-controller name: calico-policy-controller
namespace: {{system_namespace}} namespace: {{ system_namespace }}
labels: labels:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
k8s-app: calico-policy k8s-app: calico-policy
spec: spec:
hostNetwork: true hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: calico-policy-controller
{% endif %}
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists
containers: containers:
- name: calico-policy-controller - name: calico-policy-controller
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}

View file

@ -0,0 +1,17 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: {{ system_namespace }}
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list

View file

@ -0,0 +1,13 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: {{ system_namespace }}

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: {{ system_namespace }}
labels:
kubernetes.io/cluster-service: "true"

View file

@ -31,3 +31,8 @@ calicoctl_memory_limit: 170M
calicoctl_cpu_limit: 100m calicoctl_cpu_limit: 100m
calicoctl_memory_requests: 32M calicoctl_memory_requests: 32M
calicoctl_cpu_requests: 25m calicoctl_cpu_requests: 25m
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -32,16 +32,22 @@
delegate_to: "{{groups['etcd'][0]}}" delegate_to: "{{groups['etcd'][0]}}"
run_once: true run_once: true
- name: Canal | Write canal configmap - name: Canal | Create canal node manifests
template: template:
src: canal-config.yml.j2 src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/canal-config.yaml" dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- name: Canal | Write canal node configuration - {name: canal-config, file: canal-config.yaml, type: cm}
template: - {name: canal-node, file: canal-node.yaml, type: ds}
src: canal-node.yml.j2 - {name: canal, file: canal-node-sa.yml, type: sa}
dest: "{{kube_config_dir}}/canal-node.yaml" - {name: calico, file: canal-cr-calico.yml, type: clusterrole}
register: canal_node_manifest - {name: flannel, file: canal-cr-flannel.yml, type: clusterrole}
- {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding}
- {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding}
register: canal_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources
- name: Canal | Copy cni plugins from hyperkube - name: Canal | Copy cni plugins from hyperkube
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/" command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/"

View file

@ -0,0 +1,80 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: {{ system_namespace }}
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalbgppeers
verbs:
- get
- list
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
- globalbgpconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["alpha.projectcalico.org"]
resources:
- systemnetworkpolicies
verbs:
- get
- list

View file

@ -0,0 +1,26 @@
---
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch

View file

@ -0,0 +1,14 @@
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: {{ system_namespace }}

View file

@ -0,0 +1,14 @@
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: {{ system_namespace }}

View file

@ -0,0 +1,9 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: {{ system_namespace }}
labels:
kubernetes.io/cluster-service: "true"

View file

@ -19,6 +19,9 @@ spec:
k8s-app: canal-node k8s-app: canal-node
spec: spec:
hostNetwork: true hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: canal
{% endif %}
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists
@ -169,6 +172,10 @@ spec:
configMapKeyRef: configMapKeyRef:
name: canal-config name: canal-config
key: etcd_keyfile key: etcd_keyfile
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext: securityContext:
privileged: true privileged: true
volumeMounts: volumeMounts: