Always create service account even rbac_enabled = false

This commit is contained in:
Wong Hoi Sing Edison 2018-08-22 11:41:29 +08:00
parent 7398858572
commit c3b3572025
34 changed files with 3 additions and 78 deletions

View file

@ -59,7 +59,6 @@
with_items:
- "dnsmasq-clusterrolebinding.yml"
- "dnsmasq-serviceaccount.yml"
when: rbac_enabled
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
@ -68,7 +67,6 @@
with_items:
- "dnsmasq-clusterrolebinding.yml"
- "dnsmasq-serviceaccount.yml"
when: rbac_enabled
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true

View file

@ -31,9 +31,7 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
{% if rbac_enabled %}
serviceAccountName: dnsmasq
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists

View file

@ -66,8 +66,3 @@ dashboard_token_ttl: 900
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -16,7 +16,6 @@
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
tags:
- coredns
@ -34,6 +33,5 @@
when:
- dns_mode == 'coredns_dual'
- inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
tags:
- coredns

View file

@ -16,7 +16,6 @@
when:
- dns_mode in ['kubedns','dnsmasq_kubedns']
- inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
tags:
- dnsmasq
- kubedns

View file

@ -35,7 +35,6 @@
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
- name: Kubernetes Apps | Purge old Netchecker server
kube:

View file

@ -26,9 +26,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
{% if rbac_enabled %}
serviceAccountName: coredns
{% endif %}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

View file

@ -64,6 +64,4 @@ spec:
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
{% if rbac_enabled %}
serviceAccountName: cluster-proportional-autoscaler
{% endif %}

View file

@ -172,6 +172,4 @@ spec:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
{% if rbac_enabled %}
serviceAccountName: kube-dns
{% endif %}

View file

@ -33,6 +33,4 @@ spec:
tolerations:
- effect: NoSchedule
operator: Exists
{% if rbac_enabled %}
serviceAccountName: netchecker-server
{% endif %}

View file

@ -7,7 +7,6 @@
- "efk-sa.yml"
- "efk-clusterrolebinding.yml"
run_once: true
when: rbac_enabled
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
@ -15,7 +14,6 @@
- "efk-sa.yml"
- "efk-clusterrolebinding.yml"
run_once: true
when: rbac_enabled
- name: "ElasticSearch | Write ES deployment"
template:

View file

@ -52,9 +52,7 @@ spec:
volumes:
- name: es-persistent-storage
emptyDir: {}
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]

View file

@ -28,9 +28,7 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"

View file

@ -46,7 +46,4 @@ spec:
- containerPort: 5601
name: ui
protocol: TCP
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}

View file

@ -19,9 +19,7 @@ spec:
app: cephfs-provisioner
version: {{ cephfs_provisioner_image_tag }}
spec:
{% if rbac_enabled %}
serviceAccount: cephfs-provisioner
{% endif %}
containers:
- name: cephfs-provisioner
image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }}

View file

@ -13,7 +13,7 @@
- {name: tiller, file: tiller-sa.yml, type: sa}
- {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding}
register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- name: Helm | Apply Helm Manifests (RBAC)
kube:
@ -24,7 +24,7 @@
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ manifests.results }}"
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- name: Helm | Install/upgrade helm
command: >

View file

@ -21,9 +21,7 @@ spec:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
{% if rbac_enabled %}
serviceAccountName: ingress-nginx
{% endif %}
{% if ingress_nginx_host_network %}
hostNetwork: true
{% endif %}

View file

@ -8,8 +8,3 @@ calico_policy_controller_memory_requests: 64M
# SSL
calico_cert_dir: "/etc/calico/certs"
canal_cert_dir: "/etc/canal/certs"
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -26,8 +26,7 @@
- {name: calico-kube-controllers, file: calico-kube-cr.yml, type: clusterrole}
- {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
register: calico_kube_manifests
when:
- rbac_enabled or item.type not in rbac_resources
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
- name: Start of Calico kube controllers
kube:

View file

@ -21,9 +21,7 @@ spec:
k8s-app: calico-kube-controllers
spec:
hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: calico-kube-controllers
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists

View file

@ -41,11 +41,6 @@ calico_felix_prometheusprocessmetricsenabled: "true"
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
calico_node_ignorelooserpf: false
rbac_resources:
- sa
- clusterrole
- clusterrolebinding
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
# * can-reach=DESTINATION
# * interface=INTERFACE-REGEX

View file

@ -191,4 +191,3 @@
register: calico_node_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources

View file

@ -22,9 +22,7 @@ spec:
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
spec:
hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: calico-node
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists

View file

@ -31,8 +31,3 @@ calicoctl_memory_limit: 170M
calicoctl_cpu_limit: 100m
calicoctl_memory_requests: 32M
calicoctl_cpu_requests: 25m
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -53,7 +53,6 @@
register: canal_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources
- name: Canal | Copy cni plugins from hyperkube
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"

View file

@ -19,9 +19,7 @@ spec:
k8s-app: canal-node
spec:
hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: canal
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists

View file

@ -18,8 +18,3 @@ cilium_cpu_requests: 100m
# Optional features
cilium_enable_prometheus: false
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -38,7 +38,6 @@
register: cilium_node_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources
- name: Cilium | Set CNI directory permissions
file:

View file

@ -34,9 +34,7 @@ spec:
prometheus.io/port: "9090"
{% endif %}
spec:
{% if rbac_enabled %}
serviceAccountName: cilium
{% endif %}
initContainers:
- name: clean-cilium-state
image: docker.io/library/busybox:1.28.4

View file

@ -27,9 +27,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
{% if rbac_enabled %}
serviceAccountName: contiv-netmaster
{% endif %}
containers:
- name: contiv-api-proxy
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }}

View file

@ -27,9 +27,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
{% if rbac_enabled %}
serviceAccountName: contiv-netmaster
{% endif %}
containers:
- name: contiv-netmaster
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}

View file

@ -26,9 +26,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
{% if rbac_enabled %}
serviceAccountName: contiv-netplugin
{% endif %}
containers:
# Runs netplugin container on each Kubernetes node. This
# container programs network policy and routes on each

View file

@ -11,4 +11,3 @@
register: flannel_node_manifests
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources

View file

@ -52,9 +52,7 @@ spec:
tier: node
k8s-app: flannel
spec:
{% if rbac_enabled %}
serviceAccountName: flannel
{% endif %}
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }}