Move cluster roles and system namespace to new role

This should be done after kubeconfig is set for admin and
before network plugins are up.
This commit is contained in:
Matthew Mosesohn 2017-10-26 09:10:33 +01:00
parent 86fb669fd3
commit 01c48285b8
6 changed files with 62 additions and 47 deletions

View file

@ -68,6 +68,8 @@
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@ -83,7 +85,6 @@
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" } - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes/client, tags: client }
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View file

@ -47,6 +47,8 @@
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size #Finally handle worker upgrades, based on given batch size

View file

@ -5,26 +5,9 @@
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
delay: 6 delay: 2
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
register: node_crb_manifest
when: rbac_enabled
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "system:node"
kubectl: "{{bin_dir}}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
when:
- rbac_enabled
- node_crb_manifest.changed
- name: Kubernetes Apps | Delete old kubedns resources - name: Kubernetes Apps | Delete old kubedns resources
kube: kube:
name: "kubedns" name: "kubedns"

View file

@ -0,0 +1,55 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
register: result
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
register: node_crb_manifest
when: rbac_enabled
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "system:node"
kubectl: "{{bin_dir}}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
when:
- rbac_enabled
- node_crb_manifest.changed
# This is not a cluster role, but should be run after kubeconfig is set on master
- name: Write kube system namespace manifest
template:
src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Check if kube system namespace exists
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
register: 'kubesystem'
changed_when: False
failed_when: False
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Create kube system namespace
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
register: create_system_ns
until: create_system_ns.rc == 0
changed_when: False
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
tags:
- apps

View file

@ -9,34 +9,6 @@
- meta: flush_handlers - meta: flush_handlers
- name: Write kube system namespace manifest
template:
src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Check if kube system namespace exists
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
register: 'kubesystem'
changed_when: False
failed_when: False
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Create kube system namespace
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
register: create_system_ns
until: create_system_ns.rc == 0
changed_when: False
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
tags:
- apps
- name: Write kube-scheduler kubeconfig - name: Write kube-scheduler kubeconfig
template: template:
src: kube-scheduler-kubeconfig.yaml.j2 src: kube-scheduler-kubeconfig.yaml.j2

View file

@ -67,6 +67,8 @@
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }