Merge branch 'master' into synthscale

This commit is contained in:
Matthew Mosesohn 2017-02-21 22:17:43 +03:00 committed by GitHub
commit d821448e2f
51 changed files with 317 additions and 122 deletions

View file

@ -67,12 +67,13 @@ plugins can be deployed for a given single cluster.
Requirements Requirements
-------------- --------------
* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
* The target servers must have **access to the Internet** in order to pull docker images. * The target servers must have **access to the Internet** in order to pull docker images.
* The target servers are configured to allow **IPv4 forwarding**.
* **Your ssh key must be copied** to all the servers part of your inventory.
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to. * The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
* The target servers are configured to allow **IPv4 forwarding**.
* **Copy your ssh keys** to all the servers part of your inventory.
* **Ansible v2.2 (or newer) and python-netaddr**
## Network plugins ## Network plugins

View file

@ -1,7 +1,7 @@
Azure Azure
=============== ===============
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`. To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in. All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
@ -49,8 +49,8 @@ This is the AppId from the last command
- Create the role assignment with: - Create the role assignment with:
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID` `azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret. azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
## Provisioning Azure with Resource Group Templates ## Provisioning Azure with Resource Group Templates
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)

View file

@ -24,7 +24,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_users_dir: "{{ kube_config_dir }}/users" kube_users_dir: "{{ kube_config_dir }}/users"
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.5.1 kube_version: v1.5.3
# Where the binaries will be downloaded. # Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G) # Note: ensure that you've enough disk space (about 1G)

View file

@ -9,7 +9,8 @@
pre_tasks: pre_tasks:
- name: check confirmation - name: check confirmation
fail: msg="Reset confirmation failed" fail:
msg: "Reset confirmation failed"
when: reset_confirmation != "yes" when: reset_confirmation != "yes"
roles: roles:

View file

@ -1,6 +1,8 @@
--- ---
- name: User | Create User Group - name: User | Create User Group
group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}} group:
name: "{{user.group|default(user.name)}}"
system: "{{user.system|default(omit)}}"
- name: User | Create User - name: User | Create User
user: user:

View file

@ -15,4 +15,6 @@
- name: create ssh bastion conf - name: create ssh bastion conf
become: false become: false
template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf" template:
src: ssh-bastion.conf
dest: "{{ playbook_dir }}/ssh-bastion.conf"

View file

@ -1,7 +1,8 @@
--- ---
- name: Check presence of fastestmirror.conf - name: Check presence of fastestmirror.conf
stat: path=/etc/yum/pluginconf.d/fastestmirror.conf stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
register: fastestmirror register: fastestmirror
# fastestmirror plugin actually slows down Ansible deployments # fastestmirror plugin actually slows down Ansible deployments

View file

@ -23,7 +23,9 @@
tags: facts tags: facts
- name: Bootstrap | Copy get-pip.py - name: Bootstrap | Copy get-pip.py
copy: src=get-pip.py dest=~/get-pip.py copy:
src: get-pip.py
dest: ~/get-pip.py
when: (need_pip | failed) when: (need_pip | failed)
- name: Bootstrap | Install pip - name: Bootstrap | Install pip
@ -31,11 +33,16 @@
when: (need_pip | failed) when: (need_pip | failed)
- name: Bootstrap | Remove get-pip.py - name: Bootstrap | Remove get-pip.py
file: path=~/get-pip.py state=absent file:
path: ~/get-pip.py
state: absent
when: (need_pip | failed) when: (need_pip | failed)
- name: Bootstrap | Install pip launcher - name: Bootstrap | Install pip launcher
copy: src=runner dest=/opt/bin/pip mode=0755 copy:
src: runner
dest: /opt/bin/pip
mode: 0755
when: (need_pip | failed) when: (need_pip | failed)
- name: Install required python modules - name: Install required python modules

View file

@ -2,5 +2,8 @@
# Remove requiretty to make ssh pipelining work # Remove requiretty to make ssh pipelining work
- name: Remove require tty - name: Remove require tty
lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent lineinfile:
regexp: '^\w+\s+requiretty'
dest: /etc/sudoers
state: absent

View file

@ -34,7 +34,8 @@
register: dnsmasq_config register: dnsmasq_config
- name: Stat dnsmasq configuration - name: Stat dnsmasq configuration
stat: path=/etc/dnsmasq.d/01-kube-dns.conf stat:
path: /etc/dnsmasq.d/01-kube-dns.conf
register: sym register: sym
- name: Move previous configuration - name: Move previous configuration
@ -49,7 +50,9 @@
state: link state: link
- name: Create dnsmasq manifests - name: Create dnsmasq manifests
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-ds.yml, type: ds}
- {file: dnsmasq-svc.yml, type: svc} - {file: dnsmasq-svc.yml, type: svc}

View file

@ -23,7 +23,9 @@
state: restarted state: restarted
- name: Docker | pause while Docker restarts - name: Docker | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart" pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Docker | wait for docker - name: Docker | wait for docker
command: "{{ docker_bin_dir }}/docker images" command: "{{ docker_bin_dir }}/docker images"

View file

@ -51,13 +51,16 @@
when: system_search_domains.stdout != "" when: system_search_domains.stdout != ""
- name: check number of nameservers - name: check number of nameservers
fail: msg="Too many nameservers" fail:
msg: "Too many nameservers"
when: docker_dns_servers|length > 3 when: docker_dns_servers|length > 3
- name: check number of search domains - name: check number of search domains
fail: msg="Too many search domains" fail:
msg: "Too many search domains"
when: docker_dns_search_domains|length > 6 when: docker_dns_search_domains|length > 6
- name: check length of search domains - name: check length of search domains
fail: msg="Search domains exceeded limit of 256 characters" fail:
msg: "Search domains exceeded limit of 256 characters"
when: docker_dns_search_domains|join(' ')|length > 256 when: docker_dns_search_domains|join(' ')|length > 256

View file

@ -1,6 +1,8 @@
--- ---
- name: Create docker service systemd directory if it doesn't exist - name: Create docker service systemd directory if it doesn't exist
file: path=/etc/systemd/system/docker.service.d state=directory file:
path: /etc/systemd/system/docker.service.d
state: directory
- name: Write docker proxy drop-in - name: Write docker proxy drop-in
template: template:

View file

@ -5,7 +5,10 @@
when: "{{ download.enabled|bool and not download.container|bool }}" when: "{{ download.enabled|bool and not download.container|bool }}"
- name: Create dest directories - name: Create dest directories
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes file:
path: "{{local_release_dir}}/{{download.dest|dirname}}"
state: directory
recurse: yes
when: "{{ download.enabled|bool and not download.container|bool }}" when: "{{ download.enabled|bool and not download.container|bool }}"
tags: bootstrap-os tags: bootstrap-os
@ -44,7 +47,12 @@
tags: facts tags: facts
- name: Create dest directory for saved/loaded container images - name: Create dest directory for saved/loaded container images
file: path="{{local_release_dir}}/containers" state=directory recurse=yes mode=0755 owner={{ansible_ssh_user|default(ansible_user_id)}} file:
path: "{{local_release_dir}}/containers"
state: directory
recurse: yes
mode: 0755
owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
when: "{{ download.enabled|bool and download.container|bool }}" when: "{{ download.enabled|bool and download.container|bool }}"
tags: bootstrap-os tags: bootstrap-os
@ -58,7 +66,10 @@
tags: localhost tags: localhost
- name: Download | create local directory for saved/loaded container images - name: Download | create local directory for saved/loaded container images
file: path="{{local_release_dir}}/containers" state=directory recurse=yes file:
path: "{{local_release_dir}}/containers"
state: directory
recurse: yes
delegate_to: localhost delegate_to: localhost
become: false become: false
run_once: true run_once: true
@ -105,7 +116,8 @@
tags: facts tags: facts
- name: Stat saved container image - name: Stat saved container image
stat: path="{{fname}}" stat:
path: "{{fname}}"
register: img register: img
changed_when: false changed_when: false
when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}" when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}"

View file

@ -16,7 +16,8 @@
check_mode: no check_mode: no
when: not download_always_pull|bool when: not download_always_pull|bool
- set_fact: docker_images="{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}" - set_fact:
docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}"
no_log: true no_log: true
when: not download_always_pull|bool when: not download_always_pull|bool

View file

@ -16,7 +16,9 @@
when: is_etcd_master when: is_etcd_master
- name: wait for etcd up - name: wait for etcd up
uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: no
register: result register: result
until: result.status is defined and result.status == 200 until: result.status is defined and result.status == 200
retries: 10 retries: 10

View file

@ -1,11 +1,11 @@
--- ---
- name: Gen_certs | create etcd cert dir - name: Gen_certs | create etcd cert dir
file: file:
path={{ etcd_cert_dir }} path: "{{ etcd_cert_dir }}"
group={{ etcd_cert_group }} group: "{{ etcd_cert_group }}"
state=directory state: directory
owner=root owner: root
recurse=yes recurse: yes
- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})" - name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
file: file:
@ -17,11 +17,11 @@
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})" - name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
file: file:
path={{ etcd_cert_dir }} path: "{{ etcd_cert_dir }}"
group={{ etcd_cert_group }} group: "{{ etcd_cert_group }}"
state=directory state: directory
owner=root owner: root
recurse=yes recurse: yes
run_once: yes run_once: yes
delegate_to: "{{groups['etcd'][0]}}" delegate_to: "{{groups['etcd'][0]}}"
@ -126,11 +126,11 @@
- name: Gen_certs | check certificate permissions - name: Gen_certs | check certificate permissions
file: file:
path={{ etcd_cert_dir }} path: "{{ etcd_cert_dir }}"
group={{ etcd_cert_group }} group: "{{ etcd_cert_group }}"
state=directory state: directory
owner=kube owner: kube
recurse=yes recurse: yes
- name: Gen_certs | set permissions on keys - name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem

View file

@ -5,6 +5,7 @@
- include: check_certs.yml - include: check_certs.yml
when: cert_management == "script" when: cert_management == "script"
tags: [etcd-secrets, facts] tags: [etcd-secrets, facts]
- include: gen_certs_script.yml - include: gen_certs_script.yml
when: cert_management == "script" when: cert_management == "script"
tags: etcd-secrets tags: etcd-secrets
@ -12,9 +13,11 @@
- include: sync_etcd_master_certs.yml - include: sync_etcd_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups.etcd when: cert_management == "vault" and inventory_hostname in groups.etcd
tags: etcd-secrets tags: etcd-secrets
- include: sync_etcd_node_certs.yml - include: sync_etcd_node_certs.yml
when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets tags: etcd-secrets
- include: gen_certs_vault.yml - include: gen_certs_vault.yml
when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d()) when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d())
tags: etcd-secrets tags: etcd-secrets
@ -22,10 +25,13 @@
- include: "install_{{ etcd_deployment_type }}.yml" - include: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master when: is_etcd_master
tags: upgrade tags: upgrade
- include: set_cluster_health.yml - include: set_cluster_health.yml
when: is_etcd_master when: is_etcd_master
- include: configure.yml - include: configure.yml
when: is_etcd_master when: is_etcd_master
- include: refresh_config.yml - include: refresh_config.yml
when: is_etcd_master when: is_etcd_master
@ -50,5 +56,6 @@
# state insted of `new`. # state insted of `new`.
- include: set_cluster_health.yml - include: set_cluster_health.yml
when: is_etcd_master when: is_etcd_master
- include: refresh_config.yml - include: refresh_config.yml
when: is_etcd_master when: is_etcd_master

View file

@ -34,6 +34,11 @@
command: "{{ docker_bin_dir }}/docker rm -f {{item}}" command: "{{ docker_bin_dir }}/docker rm -f {{item}}"
with_items: "{{etcd_proxy_container.stdout_lines}}" with_items: "{{etcd_proxy_container.stdout_lines}}"
- name: "Pre-upgrade | see if etcdctl is installed"
stat:
path: "{{ bin_dir }}/etcdctl"
register: etcdctl_installed
- name: "Pre-upgrade | check if member list is non-SSL" - name: "Pre-upgrade | check if member list is non-SSL"
command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list" command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list"
register: etcd_member_list register: etcd_member_list
@ -41,6 +46,7 @@
delay: 3 delay: 3
until: etcd_member_list.rc != 2 until: etcd_member_list.rc != 2
run_once: true run_once: true
when: etcdctl_installed.stat.exists
failed_when: false failed_when: false
- name: "Pre-upgrade | change peer names to SSL" - name: "Pre-upgrade | change peer names to SSL"
@ -48,4 +54,4 @@
{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list | {{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list |
awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash
run_once: true run_once: true
when: 'etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout' when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout'

View file

@ -1,7 +1,9 @@
--- ---
- name: install ELRepo key - name: install ELRepo key
rpm_key: state=present key='{{ elrepo_key_url }}' rpm_key:
state: present
key: '{{ elrepo_key_url }}'
- name: install elrepo repository - name: install elrepo repository
yum: yum:
@ -9,7 +11,10 @@
state: present state: present
- name: upgrade kernel - name: upgrade kernel
yum: name={{elrepo_kernel_package}} state=present enablerepo=elrepo-kernel yum:
name: "{{elrepo_kernel_package}}"
state: present
enablerepo: elrepo-kernel
register: upgrade register: upgrade
- name: change default grub entry - name: change default grub entry

View file

@ -8,23 +8,33 @@
shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" & shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" &
- name: Wait for some seconds - name: Wait for some seconds
pause: seconds=10 pause:
seconds: 10
- set_fact: - set_fact:
is_bastion: "{{ inventory_hostname == 'bastion' }}" is_bastion: "{{ inventory_hostname == 'bastion' }}"
wait_for_delegate: "localhost" wait_for_delegate: "localhost"
- set_fact: - set_fact:
wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}" wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
when: "{{ 'bastion' in groups['all'] }}" when: "{{ 'bastion' in groups['all'] }}"
- name: wait for bastion to come back - name: wait for bastion to come back
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false become: false
delegate_to: localhost delegate_to: localhost
when: "is_bastion" when: "is_bastion"
- name: waiting for server to come back (using bastion if necessary) - name: waiting for server to come back (using bastion if necessary)
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false become: false
delegate_to: "{{ wait_for_delegate }}" delegate_to: "{{ wait_for_delegate }}"
when: "not is_bastion" when: "not is_bastion"

View file

@ -5,7 +5,9 @@
tags: facts tags: facts
- name: Write calico-policy-controller yaml - name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml template:
src: calico-policy-controller.yml.j2
dest: "{{kube_config_dir}}/calico-policy-controller.yml"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller - name: Start of Calico policy controller

View file

@ -1,6 +1,7 @@
--- ---
- name: Kubernetes Apps | Wait for kube-apiserver - name: Kubernetes Apps | Wait for kube-apiserver
uri: url=http://localhost:8080/healthz uri:
url: http://localhost:8080/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
@ -8,7 +9,9 @@
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Lay Down KubeDNS Template - name: Kubernetes Apps | Lay Down KubeDNS Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {file: kubedns-rc.yml, type: rc} - {file: kubedns-rc.yml, type: rc}
- {file: kubedns-svc.yml, type: svc} - {file: kubedns-svc.yml, type: svc}

View file

@ -1,5 +1,7 @@
- name: Kubernetes Apps | Lay Down Netchecker Template - name: Kubernetes Apps | Lay Down Netchecker Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}

View file

@ -1,2 +1,3 @@
--- ---
- debug: msg="No helm charts" - debug:
msg: "No helm charts"

View file

@ -22,21 +22,24 @@
state: restarted state: restarted
- name: Master | wait for kube-scheduler - name: Master | wait for kube-scheduler
uri: url=http://localhost:10251/healthz uri:
url: http://localhost:10251/healthz
register: scheduler_result register: scheduler_result
until: scheduler_result.status == 200 until: scheduler_result.status == 200
retries: 15 retries: 15
delay: 5 delay: 5
- name: Master | wait for kube-controller-manager - name: Master | wait for kube-controller-manager
uri: url=http://localhost:10252/healthz uri:
url: http://localhost:10252/healthz
register: controller_manager_result register: controller_manager_result
until: controller_manager_result.status == 200 until: controller_manager_result.status == 200
retries: 15 retries: 15
delay: 5 delay: 5
- name: Master | wait for the apiserver to be running - name: Master | wait for the apiserver to be running
uri: url=http://localhost:8080/healthz uri:
url: http://localhost:8080/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10

View file

@ -36,7 +36,9 @@
- meta: flush_handlers - meta: flush_handlers
- name: copy kube system namespace manifest - name: copy kube system namespace manifest
copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml copy:
src: namespace.yml
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes run_once: yes
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
tags: apps tags: apps

View file

@ -43,7 +43,8 @@
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
- name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod" - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod"
pause: seconds=20 pause:
seconds: 20
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
tags: kube-apiserver tags: kube-apiserver

View file

@ -12,12 +12,18 @@
tags: nginx tags: nginx
- name: Write kubelet config file - name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes template:
src: kubelet.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
notify: restart kubelet notify: restart kubelet
tags: kubelet tags: kubelet
- name: write the kubecfg (auth) file for kubelet - name: write the kubecfg (auth) file for kubelet
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes template:
src: node-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
backup: yes
notify: restart kubelet notify: restart kubelet
tags: kubelet tags: kubelet

View file

@ -1,9 +1,20 @@
--- ---
- name: nginx-proxy | Write static pod - name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
- name: nginx-proxy | Make nginx directory - name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root file:
path: /etc/nginx
state: directory
mode: 0700
owner: root
- name: nginx-proxy | Write nginx-proxy configuration - name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes template:
src: nginx.conf.j2
dest: "/etc/nginx/nginx.conf"
owner: root
mode: 0755
backup: yes

View file

@ -14,7 +14,9 @@
notify: Preinstall | restart network notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook - name: Remove kargo specific dhclient hook
file: path="{{ dhclienthookfile }}" state=absent file:
path: "{{ dhclienthookfile }}"
state: absent
when: dhclienthookfile is defined when: dhclienthookfile is defined
notify: Preinstall | restart network notify: Preinstall | restart network

View file

@ -3,7 +3,9 @@
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
- name: install growpart - name: install growpart
package: name=cloud-utils-growpart state=latest package:
name: cloud-utils-growpart
state: latest
- name: check if growpart needs to be run - name: check if growpart needs to be run
command: growpart -N /dev/sda 1 command: growpart -N /dev/sda 1

View file

@ -88,12 +88,18 @@
tags: [network, calico, weave, canal, bootstrap-os] tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM) - name: Update package management cache (YUM)
yum: update_cache=yes name='*' yum:
update_cache: yes
name: '*'
when: ansible_pkg_mgr == 'yum' when: ansible_pkg_mgr == 'yum'
tags: bootstrap-os tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs - name: Install latest version of python-apt for Debian distribs
apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600 apt:
name: python-apt
state: latest
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
tags: bootstrap-os tags: bootstrap-os
@ -125,9 +131,17 @@
tags: bootstrap-os tags: bootstrap-os
# Todo : selinux configuration # Todo : selinux configuration
- name: Set selinux policy to permissive - name: Confirm selinux deployed
selinux: policy=targeted state=permissive stat:
path: /etc/selinux/config
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
register: slc
- name: Set selinux policy to permissive
selinux:
policy: targeted
state: permissive
when: ansible_os_family == "RedHat" and slc.stat.exists == True
changed_when: False changed_when: False
tags: bootstrap-os tags: bootstrap-os
@ -146,7 +160,8 @@
tags: bootstrap-os tags: bootstrap-os
- name: Stat sysctl file configuration - name: Stat sysctl file configuration
stat: path={{sysctl_file_path}} stat:
path: "{{sysctl_file_path}}"
register: sysctl_file_stat register: sysctl_file_stat
tags: bootstrap-os tags: bootstrap-os
@ -198,7 +213,8 @@
tags: [bootstrap-os, resolvconf] tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM - name: Check if we are running inside a Azure VM
stat: path=/var/lib/waagent/ stat:
path: /var/lib/waagent/
register: azure_check register: azure_check
tags: bootstrap-os tags: bootstrap-os

View file

@ -1,12 +1,23 @@
--- ---
- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}" - set_fact:
- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}" kube_apiserver_count: "{{ groups['kube-master'] | length }}"
- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}"
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}" - set_fact:
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
- set_fact:
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
- set_fact:
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact: - set_fact:
loadbalancer_apiserver_localhost: false loadbalancer_apiserver_localhost: false
when: loadbalancer_apiserver is defined when: loadbalancer_apiserver is defined
- set_fact: - set_fact:
kube_apiserver_endpoint: |- kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%} {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
@ -21,34 +32,54 @@
{%- endif -%} {%- endif -%}
{%- endif %} {%- endif %}
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}" - set_fact:
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}" etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379" - set_fact:
- set_fact: etcd_authority="127.0.0.1:2379" etcd_access_address: "{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
- set_fact:
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
- set_fact:
etcd_client_url: "https://{{ etcd_access_address }}:2379"
- set_fact:
etcd_authority: "127.0.0.1:2379"
- set_fact:
etcd_endpoint: "https://{{ etcd_authority }}"
- set_fact: - set_fact:
etcd_access_addresses: |- etcd_access_addresses: |-
{% for item in groups['etcd'] -%} {% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %} https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %} {%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact: - set_fact:
etcd_member_name: |- etcd_member_name: |-
{% for host in groups['etcd'] %} {% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %} {% endfor %}
- set_fact: - set_fact:
etcd_peer_addresses: |- etcd_peer_addresses: |-
{% for item in groups['etcd'] -%} {% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %} {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %} {%- endfor %}
- set_fact: - set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact: - set_fact:
etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=") etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
- set_fact: - set_fact:
etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}" etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
- set_fact: - set_fact:
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"

View file

@ -39,11 +39,13 @@
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: target temporary resolvconf cloud init file (Container Linux by CoreOS) - name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf set_fact:
resolvconffile: /tmp/resolveconf_cloud_init_conf
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: check if /etc/dhclient.conf exists - name: check if /etc/dhclient.conf exists
stat: path=/etc/dhclient.conf stat:
path: /etc/dhclient.conf
register: dhclient_stat register: dhclient_stat
- name: target dhclient conf file for /etc/dhclient.conf - name: target dhclient conf file for /etc/dhclient.conf
@ -52,7 +54,8 @@
when: dhclient_stat.stat.exists when: dhclient_stat.stat.exists
- name: check if /etc/dhcp/dhclient.conf exists - name: check if /etc/dhcp/dhclient.conf exists
stat: path=/etc/dhcp/dhclient.conf stat:
path: /etc/dhcp/dhclient.conf
register: dhcp_dhclient_stat register: dhcp_dhclient_stat
- name: target dhclient conf file for /etc/dhcp/dhclient.conf - name: target dhclient conf file for /etc/dhcp/dhclient.conf

View file

@ -146,10 +146,10 @@
- name: Gen_certs | check certificate permissions - name: Gen_certs | check certificate permissions
file: file:
path={{ kube_cert_dir }} path: "{{ kube_cert_dir }}"
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
owner=kube owner: kube
recurse=yes recurse: yes
- name: Gen_certs | set permissions on keys - name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem shell: chmod 0600 {{ kube_cert_dir}}/*key.pem

View file

@ -1,29 +1,30 @@
--- ---
- include: check-certs.yml - include: check-certs.yml
tags: [k8s-secrets, facts] tags: [k8s-secrets, facts]
- include: check-tokens.yml - include: check-tokens.yml
tags: [k8s-secrets, facts] tags: [k8s-secrets, facts]
- name: Make sure the certificate directory exits - name: Make sure the certificate directory exits
file: file:
path={{ kube_cert_dir }} path: "{{ kube_cert_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Make sure the tokens directory exits - name: Make sure the tokens directory exits
file: file:
path={{ kube_token_dir }} path: "{{ kube_token_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Make sure the users directory exits - name: Make sure the users directory exits
file: file:
path={{ kube_users_dir }} path: "{{ kube_users_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Populate users for basic auth in API - name: Populate users for basic auth in API
lineinfile: lineinfile:
@ -62,10 +63,10 @@
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})" - name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
file: file:
path={{ kube_token_dir }} path: "{{ kube_token_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
run_once: yes run_once: yes
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
when: gen_tokens|default(false) when: gen_tokens|default(false)
@ -77,9 +78,11 @@
- include: sync_kube_master_certs.yml - include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master'] when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets tags: k8s-secrets
- include: sync_kube_node_certs.yml - include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster'] when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets tags: k8s-secrets
- include: gen_certs_vault.yml - include: gen_certs_vault.yml
when: cert_management == "vault" when: cert_management == "vault"
tags: k8s-secrets tags: k8s-secrets

View file

@ -16,7 +16,7 @@ DNS.5 = localhost
DNS.{{ 5 + loop.index }} = {{ host }} DNS.{{ 5 + loop.index }} = {{ host }}
{% endfor %} {% endfor %}
{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} {% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
{% set idx = groups['kube-master'] | length | int + 5 %} {% set idx = groups['kube-master'] | length | int + 5 + 1 %}
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
{% endif %} {% endif %}
{% for host in groups['kube-master'] %} {% for host in groups['kube-master'] %}

View file

@ -35,11 +35,15 @@
group: root group: root
- name: Calico-rr | Write calico-rr.env for systemd init file - name: Calico-rr | Write calico-rr.env for systemd init file
template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env template:
src: calico-rr.env.j2
dest: /etc/calico/calico-rr.env
notify: restart calico-rr notify: restart calico-rr
- name: Calico-rr | Write calico-rr systemd init file - name: Calico-rr | Write calico-rr systemd init file
template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service template:
src: calico-rr.service.j2
dest: /etc/systemd/system/calico-rr.service
notify: restart calico-rr notify: restart calico-rr
- name: Calico-rr | Configure route reflector - name: Calico-rr | Configure route reflector

View file

@ -60,7 +60,9 @@
tags: [hyperkube, upgrade] tags: [hyperkube, upgrade]
- name: Calico | wait for etcd - name: Calico | wait for etcd
uri: url=https://localhost:2379/health validate_certs=no uri:
url: https://localhost:2379/health
validate_certs: no
register: result register: result
until: result.status == 200 or result.status == 401 until: result.status == 200 or result.status == 401
retries: 10 retries: 10
@ -160,17 +162,23 @@
when: legacy_calicoctl when: legacy_calicoctl
- name: Calico (old) | Write calico-node systemd init file - name: Calico (old) | Write calico-node systemd init file
template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service template:
src: calico-node.service.legacy.j2
dest: /etc/systemd/system/calico-node.service
when: legacy_calicoctl when: legacy_calicoctl
notify: restart calico-node notify: restart calico-node
- name: Calico | Write calico.env for systemd init file - name: Calico | Write calico.env for systemd init file
template: src=calico.env.j2 dest=/etc/calico/calico.env template:
src: calico.env.j2
dest: /etc/calico/calico.env
when: not legacy_calicoctl when: not legacy_calicoctl
notify: restart calico-node notify: restart calico-node
- name: Calico | Write calico-node systemd init file - name: Calico | Write calico-node systemd init file
template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service template:
src: calico-node.service.j2
dest: /etc/systemd/system/calico-node.service
when: not legacy_calicoctl when: not legacy_calicoctl
notify: restart calico-node notify: restart calico-node

View file

@ -28,7 +28,9 @@
state: restarted state: restarted
- name: Flannel | pause while Docker restarts - name: Flannel | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart" pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Flannel | wait for docker - name: Flannel | wait for docker
command: "{{ docker_bin_dir }}/docker images" command: "{{ docker_bin_dir }}/docker images"

View file

@ -1,7 +1,9 @@
--- ---
- name: reset | stop services - name: reset | stop services
service: name={{ item }} state=stopped service:
name: "{{ item }}"
state: stopped
with_items: with_items:
- kubelet - kubelet
- etcd - etcd
@ -33,7 +35,9 @@
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
- name: reset | restart docker if needed - name: reset | restart docker if needed
service: name=docker state=restarted service:
name: docker
state: restarted
when: docker_dropins_removed.changed when: docker_dropins_removed.changed
- name: reset | gather mounted kubelet dirs - name: reset | gather mounted kubelet dirs
@ -46,7 +50,9 @@
with_items: '{{ mounted_dirs.stdout_lines }}' with_items: '{{ mounted_dirs.stdout_lines }}'
- name: reset | delete some files and directories - name: reset | delete some files and directories
file: path={{ item }} state=absent file:
path: "{{ item }}"
state: absent
with_items: with_items:
- "{{kube_config_dir}}" - "{{kube_config_dir}}"
- /var/lib/kubelet - /var/lib/kubelet

View file

@ -4,7 +4,7 @@ local_release_dir: /tmp
# Versions # Versions
etcd_version: v3.0.6 etcd_version: v3.0.6
calico_version: v0.23.0 calico_version: v0.23.0
calico_cni_version: v1.4.2 calico_cni_version: v1.5.6
weave_version: v1.8.2 weave_version: v1.8.2
# Download URL's # Download URL's
@ -14,8 +14,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea
weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave"
# Checksums # Checksums
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548" calico_cni_checksum: "9a6bd6da267c498a1833117777c069f44f720d23226d8459bada2a0b41cb8258"
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172" calico_cni_ipam_checksum: "8d3574736df1ce10ea88fdec94d84dc58642081d3774d2d48249c6ee94ed316d"
weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9" weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"

View file

@ -1,6 +1,9 @@
--- ---
- name: Create dest directories - name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes file:
path: "{{local_release_dir}}/{{item.dest|dirname}}"
state: directory
recurse: yes
with_items: '{{downloads}}' with_items: '{{downloads}}'
- name: Download items - name: Download items

View file

@ -2,8 +2,10 @@
- include: ../shared/check_vault.yml - include: ../shared/check_vault.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: sync_secrets.yml - include: sync_secrets.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: ../shared/find_leader.yml - include: ../shared/find_leader.yml
when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d()
@ -54,5 +56,6 @@
- include: role_auth_cert.yml - include: role_auth_cert.yml
when: vault_role_auth_method == "cert" when: vault_role_auth_method == "cert"
- include: role_auth_userpass.yml - include: role_auth_userpass.yml
when: vault_role_auth_method == "userpass" when: vault_role_auth_method == "userpass"

View file

@ -21,5 +21,6 @@
ca_name: auth-ca ca_name: auth-ca
mount_name: auth-pki mount_name: auth-pki
when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed
- include: create_etcd_role.yml - include: create_etcd_role.yml
when: inventory_hostname in groups.etcd when: inventory_hostname in groups.etcd

View file

@ -6,5 +6,6 @@
auth_backend_path: userpass auth_backend_path: userpass
auth_backend_type: userpass auth_backend_type: userpass
when: inventory_hostname == groups.vault|first when: inventory_hostname == groups.vault|first
- include: create_etcd_role.yml - include: create_etcd_role.yml
when: inventory_hostname in groups.etcd when: inventory_hostname in groups.etcd

View file

@ -2,6 +2,7 @@
- include: ../shared/check_vault.yml - include: ../shared/check_vault.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: ../shared/check_etcd.yml - include: ../shared/check_etcd.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
@ -9,18 +10,25 @@
- include: configure.yml - include: configure.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: binary.yml - include: binary.yml
when: inventory_hostname in groups.vault and vault_deployment_type == "host" when: inventory_hostname in groups.vault and vault_deployment_type == "host"
- include: systemd.yml - include: systemd.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: init.yml - include: init.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: unseal.yml - include: unseal.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: ../shared/find_leader.yml - include: ../shared/find_leader.yml
when: inventory_hostname in groups.vault when: inventory_hostname in groups.vault
- include: ../shared/pki_mount.yml - include: ../shared/pki_mount.yml
when: inventory_hostname == groups.vault|first when: inventory_hostname == groups.vault|first
- include: ../shared/config_ca.yml - include: ../shared/config_ca.yml
vars: vars:
ca_name: ca ca_name: ca
@ -31,5 +39,6 @@
- include: role_auth_cert.yml - include: role_auth_cert.yml
when: vault_role_auth_method == "cert" when: vault_role_auth_method == "cert"
- include: role_auth_userpass.yml - include: role_auth_userpass.yml
when: vault_role_auth_method == "userpass" when: vault_role_auth_method == "userpass"