Cleanup legacy syntax, spacing, files all to yml

Migrate older inline= syntax to pure yml syntax for module args as to be consistant with most of the rest of the tasks
Cleanup some spacing in various files
Rename some files named yaml to yml for consistancy
This commit is contained in:
Andrew Greenwood 2017-02-17 16:22:34 -05:00
parent e16ebcad6e
commit ca9ea097df
45 changed files with 291 additions and 109 deletions

View file

@ -9,7 +9,8 @@
pre_tasks:
- name: check confirmation
fail: msg="Reset confirmation failed"
fail:
msg: "Reset confirmation failed"
when: reset_confirmation != "yes"
roles:

View file

@ -1,6 +1,8 @@
---
- name: User | Create User Group
group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}}
group:
name: "{{user.group|default(user.name)}}"
system: "{{user.system|default(omit)}}"
- name: User | Create User
user:

View file

@ -15,4 +15,6 @@
- name: create ssh bastion conf
become: false
template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf"
template:
src: ssh-bastion.conf
dest: "{{ playbook_dir }}/ssh-bastion.conf"

View file

@ -1,7 +1,8 @@
---
- name: Check presence of fastestmirror.conf
stat: path=/etc/yum/pluginconf.d/fastestmirror.conf
stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
register: fastestmirror
# fastestmirror plugin actually slows down Ansible deployments

View file

@ -23,7 +23,9 @@
tags: facts
- name: Bootstrap | Copy get-pip.py
copy: src=get-pip.py dest=~/get-pip.py
copy:
src: get-pip.py
dest: ~/get-pip.py
when: (need_pip | failed)
- name: Bootstrap | Install pip
@ -31,11 +33,16 @@
when: (need_pip | failed)
- name: Bootstrap | Remove get-pip.py
file: path=~/get-pip.py state=absent
file:
path: ~/get-pip.py
state: absent
when: (need_pip | failed)
- name: Bootstrap | Install pip launcher
copy: src=runner dest=/opt/bin/pip mode=0755
copy:
src: runner
dest: /opt/bin/pip
mode: 0755
when: (need_pip | failed)
- name: Install required python modules

View file

@ -2,5 +2,8 @@
# Remove requiretty to make ssh pipelining work
- name: Remove require tty
lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent
lineinfile:
regexp: "^\w+\s+requiretty"
dest: /etc/sudoers
state: absent

View file

@ -34,7 +34,8 @@
register: dnsmasq_config
- name: Stat dnsmasq configuration
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
stat:
path: /etc/dnsmasq.d/01-kube-dns.conf
register: sym
- name: Move previous configuration
@ -49,7 +50,9 @@
state: link
- name: Create dnsmasq manifests
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {file: dnsmasq-ds.yml, type: ds}
- {file: dnsmasq-svc.yml, type: svc}

View file

@ -23,7 +23,9 @@
state: restarted
- name: Docker | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart"
pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Docker | wait for docker
command: "{{ docker_bin_dir }}/docker images"

View file

@ -51,13 +51,16 @@
when: system_search_domains.stdout != ""
- name: check number of nameservers
fail: msg="Too many nameservers"
fail:
msg: "Too many nameservers"
when: docker_dns_servers|length > 3
- name: check number of search domains
fail: msg="Too many search domains"
fail:
msg: "Too many search domains"
when: docker_dns_search_domains|length > 6
- name: check length of search domains
fail: msg="Search domains exceeded limit of 256 characters"
fail:
msg: "Search domains exceeded limit of 256 characters"
when: docker_dns_search_domains|join(' ')|length > 256

View file

@ -1,6 +1,8 @@
---
- name: Create docker service systemd directory if it doesn't exist
file: path=/etc/systemd/system/docker.service.d state=directory
file:
path: /etc/systemd/system/docker.service.d
state: directory
- name: Write docker proxy drop-in
template:

View file

@ -5,7 +5,10 @@
when: "{{ download.enabled|bool and not download.container|bool }}"
- name: Create dest directories
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
file:
path: "{{local_release_dir}}/{{download.dest|dirname}}"
state: directory
recurse: yes
when: "{{ download.enabled|bool and not download.container|bool }}"
tags: bootstrap-os
@ -44,7 +47,12 @@
tags: facts
- name: Create dest directory for saved/loaded container images
file: path="{{local_release_dir}}/containers" state=directory recurse=yes mode=0755 owner={{ansible_ssh_user|default(ansible_user_id)}}
file:
path: "{{local_release_dir}}/containers"
state: directory
recurse: yes
mode: 0755
owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
when: "{{ download.enabled|bool and download.container|bool }}"
tags: bootstrap-os
@ -58,7 +66,10 @@
tags: localhost
- name: Download | create local directory for saved/loaded container images
file: path="{{local_release_dir}}/containers" state=directory recurse=yes
file:
path: "{{local_release_dir}}/containers"
state: directory
recurse: yes
delegate_to: localhost
become: false
run_once: true
@ -105,7 +116,8 @@
tags: facts
- name: Stat saved container image
stat: path="{{fname}}"
stat:
path: "{{fname}}"
register: img
changed_when: false
when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}"

View file

@ -15,7 +15,8 @@
check_mode: no
when: not download_always_pull|bool
- set_fact: docker_images="{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}"
- set_fact:
docker_images: "{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}"
when: not download_always_pull|bool
- set_fact:

View file

@ -16,7 +16,9 @@
when: is_etcd_master
- name: wait for etcd up
uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: no
register: result
until: result.status is defined and result.status == 200
retries: 10

View file

@ -1,11 +1,11 @@
---
- name: Gen_certs | create etcd cert dir
file:
path={{ etcd_cert_dir }}
group={{ etcd_cert_group }}
state=directory
owner=root
recurse=yes
path: "{{ etcd_cert_dir }}"
group: "{{ etcd_cert_group }}"
state: directory
owner: root
recurse: yes
- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
file:
@ -17,11 +17,11 @@
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
file:
path={{ etcd_cert_dir }}
group={{ etcd_cert_group }}
state=directory
owner=root
recurse=yes
path: "{{ etcd_cert_dir }}"
group: "{{ etcd_cert_group }}"
state: directory
owner: root
recurse: yes
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
@ -123,11 +123,11 @@
- name: Gen_certs | check certificate permissions
file:
path={{ etcd_cert_dir }}
group={{ etcd_cert_group }}
state=directory
owner=kube
recurse=yes
path: "{{ etcd_cert_dir }}"
group: "{{ etcd_cert_group }}"
state: directory
owner: kube
recurse: yes
- name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem

View file

@ -5,6 +5,7 @@
- include: check_certs.yml
when: cert_management == "script"
tags: [etcd-secrets, facts]
- include: gen_certs_script.yml
when: cert_management == "script"
tags: etcd-secrets
@ -12,9 +13,11 @@
- include: sync_etcd_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups.etcd
tags: etcd-secrets
- include: sync_etcd_node_certs.yml
when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d())
tags: etcd-secrets
@ -22,10 +25,13 @@
- include: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
tags: upgrade
- include: set_cluster_health.yml
when: is_etcd_master
- include: configure.yml
when: is_etcd_master
- include: refresh_config.yml
when: is_etcd_master
@ -50,5 +56,6 @@
# state insted of `new`.
- include: set_cluster_health.yml
when: is_etcd_master
- include: refresh_config.yml
when: is_etcd_master

View file

@ -1,7 +1,9 @@
---
- name: install ELRepo key
rpm_key: state=present key='{{ elrepo_key_url }}'
rpm_key:
state: present
key: '{{ elrepo_key_url }}'
- name: install elrepo repository
yum:
@ -9,7 +11,10 @@
state: present
- name: upgrade kernel
yum: name={{elrepo_kernel_package}} state=present enablerepo=elrepo-kernel
yum:
name: "{{elrepo_kernel_package}}"
state: present
enablerepo: elrepo-kernel
register: upgrade
- name: change default grub entry

View file

@ -8,23 +8,33 @@
shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" &
- name: Wait for some seconds
pause: seconds=10
pause:
seconds: 10
- set_fact:
is_bastion: "{{ inventory_hostname == 'bastion' }}"
wait_for_delegate: "localhost"
- set_fact:
wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
when: "{{ 'bastion' in groups['all'] }}"
- name: wait for bastion to come back
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300
wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false
delegate_to: localhost
when: "is_bastion"
- name: waiting for server to come back (using bastion if necessary)
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300
wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false
delegate_to: "{{ wait_for_delegate }}"
when: "not is_bastion"

View file

@ -5,7 +5,9 @@
tags: facts
- name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml
template:
src: calico-policy-controller.yml.j2
dest: "{{kube_config_dir}}/calico-policy-controller.yml"
when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller

View file

@ -1,6 +1,7 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri: url=http://localhost:8080/healthz
uri:
url: http://localhost:8080/healthz
register: result
until: result.status == 200
retries: 10
@ -8,7 +9,9 @@
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Lay Down KubeDNS Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {file: kubedns-rc.yml, type: rc}
- {file: kubedns-svc.yml, type: svc}

View file

@ -1,5 +1,7 @@
- name: Kubernetes Apps | Lay Down Netchecker Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}

View file

@ -1,2 +1,3 @@
---
- debug: msg="No helm charts"
- debug:
msg: "No helm charts"

View file

@ -22,21 +22,24 @@
state: restarted
- name: Master | wait for kube-scheduler
uri: url=http://localhost:10251/healthz
uri:
url: http://localhost:10251/healthz
register: scheduler_result
until: scheduler_result.status == 200
retries: 15
delay: 5
- name: Master | wait for kube-controller-manager
uri: url=http://localhost:10252/healthz
uri:
url: http://localhost:10252/healthz
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 15
delay: 5
- name: Master | wait for the apiserver to be running
uri: url=http://localhost:8080/healthz
uri:
url: http://localhost:8080/healthz
register: result
until: result.status == 200
retries: 10

View file

@ -36,7 +36,9 @@
- meta: flush_handlers
- name: copy kube system namespace manifest
copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml
copy:
src: namespace.yml
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
tags: apps

View file

@ -43,7 +43,8 @@
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
- name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod"
pause: seconds=20
pause:
seconds: 20
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
tags: kube-apiserver

View file

@ -12,12 +12,18 @@
tags: nginx
- name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes
template:
src: kubelet.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
notify: restart kubelet
tags: kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
template:
src: node-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
backup: yes
notify: restart kubelet
tags: kubelet

View file

@ -1,9 +1,20 @@
---
- name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml
template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
- name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root
file:
path: /etc/nginx
state: directory
mode: 0700
owner: root
- name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes
template:
src: nginx.conf.j2
dest: "/etc/nginx/nginx.conf"
owner: root
mode: 0755
backup: yes

View file

@ -14,7 +14,9 @@
notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook
file: path="{{ dhclienthookfile }}" state=absent
file:
path: "{{ dhclienthookfile }}"
state: absent
when: dhclienthookfile is defined
notify: Preinstall | restart network

View file

@ -3,7 +3,9 @@
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
- name: install growpart
package: name=cloud-utils-growpart state=latest
package:
name: cloud-utils-growpart
state: latest
- name: check if growpart needs to be run
command: growpart -N /dev/sda 1

View file

@ -88,12 +88,18 @@
tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM)
yum: update_cache=yes name='*'
yum:
update_cache: yes
name: '*'
when: ansible_pkg_mgr == 'yum'
tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs
apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600
apt:
name: python-apt
state: latest
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"
tags: bootstrap-os
@ -126,7 +132,9 @@
# Todo : selinux configuration
- name: Set selinux policy to permissive
selinux: policy=targeted state=permissive
selinux:
policy: targeted
state: permissive
when: ansible_os_family == "RedHat"
changed_when: False
tags: bootstrap-os
@ -146,7 +154,8 @@
tags: bootstrap-os
- name: Stat sysctl file configuration
stat: path={{sysctl_file_path}}
stat:
path: "{{sysctl_file_path}}"
register: sysctl_file_stat
tags: bootstrap-os
@ -198,7 +207,8 @@
tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM
stat: path=/var/lib/waagent/
stat:
path: /var/lib/waagent/
register: azure_check
tags: bootstrap-os

View file

@ -1,12 +1,23 @@
---
- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}"
- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}"
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact:
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
- set_fact:
kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
- set_fact:
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
- set_fact:
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact:
loadbalancer_apiserver_localhost: false
when: loadbalancer_apiserver is defined
- set_fact:
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
@ -21,34 +32,54 @@
{%- endif -%}
{%- endif %}
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379"
- set_fact: etcd_authority="127.0.0.1:2379"
- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
- set_fact:
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
- set_fact:
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
- set_fact:
etcd_client_url: "https://{{ etcd_access_address }}:2379"
- set_fact:
etcd_authority: "127.0.0.1:2379"
- set_fact:
etcd_endpoint: "https://{{ etcd_authority }}"
- set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact:
etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
- set_fact:
etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
- set_fact:
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"

View file

@ -39,11 +39,13 @@
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf
set_fact:
resolvconffile: /tmp/resolveconf_cloud_init_conf
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: check if /etc/dhclient.conf exists
stat: path=/etc/dhclient.conf
stat:
path: /etc/dhclient.conf
register: dhclient_stat
- name: target dhclient conf file for /etc/dhclient.conf
@ -52,7 +54,8 @@
when: dhclient_stat.stat.exists
- name: check if /etc/dhcp/dhclient.conf exists
stat: path=/etc/dhcp/dhclient.conf
stat:
path: /etc/dhcp/dhclient.conf
register: dhcp_dhclient_stat
- name: target dhclient conf file for /etc/dhcp/dhclient.conf

View file

@ -142,10 +142,10 @@
- name: Gen_certs | check certificate permissions
file:
path={{ kube_cert_dir }}
group={{ kube_cert_group }}
owner=kube
recurse=yes
path: "{{ kube_cert_dir }}"
group: "{{ kube_cert_group }}"
owner: kube
recurse: yes
- name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem

View file

@ -1,29 +1,30 @@
---
- include: check-certs.yml
tags: [k8s-secrets, facts]
- include: check-tokens.yml
tags: [k8s-secrets, facts]
- name: Make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_cert_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_token_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Make sure the users directory exits
file:
path={{ kube_users_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_users_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Populate users for basic auth in API
lineinfile:
@ -62,10 +63,10 @@
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_token_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_tokens|default(false)
@ -77,9 +78,11 @@
- include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault"
tags: k8s-secrets

View file

@ -35,11 +35,15 @@
group: root
- name: Calico-rr | Write calico-rr.env for systemd init file
template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env
template:
src: calico-rr.env.j2
dest: /etc/calico/calico-rr.env
notify: restart calico-rr
- name: Calico-rr | Write calico-rr systemd init file
template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service
template:
src: calico-rr.service.j2
dest: /etc/systemd/system/calico-rr.service
notify: restart calico-rr
- name: Calico-rr | Configure route reflector

View file

@ -60,7 +60,9 @@
tags: [hyperkube, upgrade]
- name: Calico | wait for etcd
uri: url=https://localhost:2379/health validate_certs=no
uri:
url: https://localhost:2379/health
validate_certs: no
register: result
until: result.status == 200 or result.status == 401
retries: 10
@ -160,17 +162,23 @@
when: legacy_calicoctl
- name: Calico (old) | Write calico-node systemd init file
template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service
template:
src: calico-node.service.legacy.j2
dest: /etc/systemd/system/calico-node.service
when: legacy_calicoctl
notify: restart calico-node
- name: Calico | Write calico.env for systemd init file
template: src=calico.env.j2 dest=/etc/calico/calico.env
template:
src: calico.env.j2
dest: /etc/calico/calico.env
when: not legacy_calicoctl
notify: restart calico-node
- name: Calico | Write calico-node systemd init file
template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
template:
src: calico-node.service.j2
dest: /etc/systemd/system/calico-node.service
when: not legacy_calicoctl
notify: restart calico-node

View file

@ -28,7 +28,9 @@
state: restarted
- name: Flannel | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart"
pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Flannel | wait for docker
command: "{{ docker_bin_dir }}/docker images"

View file

@ -1,7 +1,9 @@
---
- name: reset | stop services
service: name={{ item }} state=stopped
service:
name: "{{ item }}"
state: stopped
with_items:
- kubelet
- etcd
@ -33,7 +35,9 @@
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
- name: reset | restart docker if needed
service: name=docker state=restarted
service:
name: docker
state: restarted
when: docker_dropins_removed.changed
- name: reset | gather mounted kubelet dirs
@ -46,7 +50,9 @@
with_items: '{{ mounted_dirs.stdout_lines }}'
- name: reset | delete some files and directories
file: path={{ item }} state=absent
file:
path: "{{ item }}"
state: absent
with_items:
- "{{kube_config_dir}}"
- /var/lib/kubelet

View file

@ -1,6 +1,9 @@
---
- name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
file:
path: "{{local_release_dir}}/{{item.dest|dirname}}"
state: directory
recurse: yes
with_items: '{{downloads}}'
- name: Download items

View file

@ -2,8 +2,10 @@
- include: ../shared/check_vault.yml
when: inventory_hostname in groups.vault
- include: sync_secrets.yml
when: inventory_hostname in groups.vault
- include: ../shared/find_leader.yml
when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d()
@ -54,5 +56,6 @@
- include: role_auth_cert.yml
when: vault_role_auth_method == "cert"
- include: role_auth_userpass.yml
when: vault_role_auth_method == "userpass"

View file

@ -21,5 +21,6 @@
ca_name: auth-ca
mount_name: auth-pki
when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed
- include: create_etcd_role.yml
when: inventory_hostname in groups.etcd

View file

@ -6,5 +6,6 @@
auth_backend_path: userpass
auth_backend_type: userpass
when: inventory_hostname == groups.vault|first
- include: create_etcd_role.yml
when: inventory_hostname in groups.etcd

View file

@ -2,6 +2,7 @@
- include: ../shared/check_vault.yml
when: inventory_hostname in groups.vault
- include: ../shared/check_etcd.yml
when: inventory_hostname in groups.vault
@ -9,18 +10,25 @@
- include: configure.yml
when: inventory_hostname in groups.vault
- include: binary.yml
when: inventory_hostname in groups.vault and vault_deployment_type == "host"
- include: systemd.yml
when: inventory_hostname in groups.vault
- include: init.yml
when: inventory_hostname in groups.vault
- include: unseal.yml
when: inventory_hostname in groups.vault
- include: ../shared/find_leader.yml
when: inventory_hostname in groups.vault
- include: ../shared/pki_mount.yml
when: inventory_hostname == groups.vault|first
- include: ../shared/config_ca.yml
vars:
ca_name: ca
@ -31,5 +39,6 @@
- include: role_auth_cert.yml
when: vault_role_auth_method == "cert"
- include: role_auth_userpass.yml
when: vault_role_auth_method == "userpass"