Refactor roles and hosts

Shorten deployment time with:
- Remove redundand roles if duplicated by a dependency and vice versa
- When a member of k8s-cluster, always install docker as a dependency
  of the etcd role and drop the docker role from cluster.yaml.
- Drop etcd and node role dependencies from master role as they are
  covered by the node role in k8s-cluster group as well. Copy defaults
  for master from node role.
- Decouple master, node, secrets roles handlers and vars to be used w/o
  cross references.

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
Bogdan Dobrelya 2016-08-25 11:16:14 +02:00
parent c71b078c8e
commit 8168689caa
8 changed files with 27 additions and 19 deletions

View file

@ -5,19 +5,18 @@
- hosts: etcd:!k8s-cluster
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: etcd, tags: etcd }
- hosts: k8s-cluster
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: etcd, tags: etcd }
- { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network }
- hosts: kube-master
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: kubernetes/master, tags: master }
- hosts: k8s-cluster

View file

@ -7,5 +7,4 @@ dependencies:
file: "{{ downloads.etcd }}"
when: etcd_deployment_type == "host"
- role: docker
when: (ansible_os_family != "CoreOS" and etcd_deployment_type == "docker")
- role: "kubernetes/preinstall"
when: (ansible_os_family != "CoreOS" and etcd_deployment_type == "docker" or inventory_hostname in groups['k8s-cluster'])

View file

@ -0,0 +1,15 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false"
hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"

View file

@ -2,8 +2,8 @@
- name: Master | restart kubelet
command: /bin/true
notify:
- Kubelet | reload systemd
- Kubelet | reload kubelet
- Master | reload systemd
- Master | reload kubelet
- name: wait for master static pods
command: /bin/true

View file

@ -2,5 +2,3 @@
dependencies:
- role: download # For kube_version variable
file: "{{ downloads.nothing }}"
- { role: etcd }
- { role: kubernetes/node }

View file

@ -1,12 +1,6 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
dns_domain: "{{ cluster_name }}"
# resolv.conf to base dns config
@ -14,10 +8,6 @@ kube_resolv_conf: "/etc/resolv.conf"
kube_proxy_mode: iptables
# An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false"
hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"

View file

@ -0,0 +1,8 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"

View file

@ -6,4 +6,3 @@ dependencies:
when: kube_network_plugin == 'flannel'
- role: network_plugin/weave
when: kube_network_plugin == 'weave'
- role: docker