c12s-kubespray/roles/network_plugin/calico/tasks/main.yml
Bogdan Dobrelya fd83ec6526 Add etcd proxy support
* Enforce a etcd-proxy role to a k8s-cluster group members. This
provides an HA layout for all of the k8s cluster internal clients.
* Proxies to be run on each node in the group as a separate etcd
instances with a readwrite proxy mode and listen the given endpoint,
which is either the access_ip:2379 or the localhost:2379.
* A notion for the 'kube_etcd_multiaccess' is: ignore endpoints and
loadbalancers and use the etcd members IPs as a comma-separated
list. Otherwise, clients shall use the local endpoint provided by a
etcd-proxy instances on each etcd node. A Netwroking plugins always
use that access mode.
* Fix apiserver's etcd servers args to use the etcd_access_endpoint.
* Fix networking plugins flannel/calico to use the etcd_endpoint.
* Fix name env var for non masters to be set as well.
* Fix etcd_client_url was not used anywhere and other etcd_* facts
evaluation was duplicated in a few places.
* Define proxy modes only in the env file, if not a master. Del
an automatic proxy mode decisions for etcd nodes in init/unit scripts.
* Use Wants= instead of Requires= as "This is the recommended way to
hook start-up of one unit to the start-up of another unit"
* Make apiserver/calico Wants= etcd-proxy to keep it always up

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
Co-authored-by: Matthew Mosesohn <mmosesohn@mirantis.com>
2016-07-19 14:09:40 +02:00

121 lines
4.3 KiB
YAML

---
- name: Calico | Set docker daemon options
template:
src: docker
dest: "/etc/default/docker"
owner: root
group: root
mode: 0644
notify:
- restart docker
when: ansible_os_family != "CoreOS"
- name: Calico | Write docker.service systemd file
template:
src: systemd-docker.service
dest: /lib/systemd/system/docker.service
notify: restart docker
when: ansible_service_mgr == "systemd" and ansible_os_family != "CoreOS"
- meta: flush_handlers
- name: Calico | Install calicoctl container script
template:
src: calicoctl-container.j2
dest: "{{ bin_dir }}/calicoctl"
mode: 0755
owner: root
group: root
changed_when: false
notify: restart calico-node
- name: Calico | Install calico cni bin
command: rsync -piu "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico"
changed_when: false
- name: Calico | Install calico-ipam cni bin
command: rsync -piu "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico-ipam"
changed_when: false
- name: Calico | wait for etcd
wait_for:
port: 2379
when: inventory_hostname in groups['kube-master']
- name: Calico | Check if calico network pool has already been configured
uri:
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
status_code: 200,404
register: calico_conf
run_once: true
- name: Calico | Configure calico network pool
command: "{{ bin_dir }}/calicoctl pool add {{ kube_pods_subnet }}"
run_once: true
when: calico_conf.status == 404 and cloud_provider is not defined
and not nat_outgoing|default(false) or
(nat_outgoing|default(false) and peer_with_router|default(false))
- name: Calico | Configure calico network pool for cloud
command: "{{ bin_dir }}/calicoctl pool add {{ kube_pods_subnet }} --ipip --nat-outgoing"
run_once: true
when: calico_conf.status == 404 and cloud_provider is defined
- name: Calico | Configure calico network pool with nat outgoing
command: "{{ bin_dir}}/calicoctl pool add {{ kube_pods_subnet }} --nat-outgoing"
run_once: true
when: calico_conf.status == 404 and cloud_provider is not defined
and nat_outgoing|default(false) and not peer_with_router|default(false)
- name: Calico | Get calico configuration from etcd
uri:
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
register: calico_pools
run_once: true
- name: Calico | Check if calico pool is properly configured
fail:
msg: 'Only one network pool must be configured and it must be the subnet {{ kube_pods_subnet }}.
Please erase calico configuration and run the playbook again ("etcdctl rm --recursive /calico/v1/ipam/v4/pool")'
when: ( calico_pools.json['node']['nodes'] | length > 1 ) or
( not calico_pools.json['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") )
run_once: true
- name: Calico | Write /etc/network-environment
template: src=network-environment.j2 dest=/etc/network-environment
when: ansible_service_mgr in ["sysvinit","upstart"]
- name: Calico | Write calico-node systemd init file
template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
when: ansible_service_mgr == "systemd"
notify: restart calico-node
- name: Calico | Write calico-node initd script
template: src=deb-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
notify: restart calico-node
- name: Calico | Write calico-node initd script
template: src=rh-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "RedHat"
notify: restart calico-node
- meta: flush_handlers
- name: Calico | Enable calico-node
service:
name: calico-node
state: started
enabled: yes
- name: Calico | Disable node mesh
shell: "{{ bin_dir }}/calicoctl bgp node-mesh off"
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s)
shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}"
with_items: peers
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']