diff --git a/reset.yml b/reset.yml index 9cf504756..42a188ccc 100644 --- a/reset.yml +++ b/reset.yml @@ -9,7 +9,8 @@ pre_tasks: - name: check confirmation - fail: msg="Reset confirmation failed" + fail: + msg: "Reset confirmation failed" when: reset_confirmation != "yes" roles: diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml index 394ff9294..43ec8ebbb 100644 --- a/roles/adduser/tasks/main.yml +++ b/roles/adduser/tasks/main.yml @@ -1,6 +1,8 @@ --- - name: User | Create User Group - group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}} + group: + name: "{{user.group|default(user.name)}}" + system: "{{user.system|default(omit)}}" - name: User | Create User user: diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml index d1aae5ca8..2d240a560 100644 --- a/roles/bastion-ssh-config/tasks/main.yml +++ b/roles/bastion-ssh-config/tasks/main.yml @@ -15,4 +15,6 @@ - name: create ssh bastion conf become: false - template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf" + template: + src: ssh-bastion.conf + dest: "{{ playbook_dir }}/ssh-bastion.conf" diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml index 9c41ae84c..b8cf126c1 100644 --- a/roles/bootstrap-os/tasks/bootstrap-centos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -1,7 +1,8 @@ --- - name: Check presence of fastestmirror.conf - stat: path=/etc/yum/pluginconf.d/fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf register: fastestmirror # fastestmirror plugin actually slows down Ansible deployments diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml index 9ef440e59..b806d9f6d 100644 --- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -23,7 +23,9 @@ tags: facts - name: Bootstrap | Copy get-pip.py - copy: src=get-pip.py dest=~/get-pip.py + copy: + src: get-pip.py + dest: ~/get-pip.py when: (need_pip | failed) - name: Bootstrap | Install pip @@ -31,11 +33,16 @@ when: (need_pip | failed) - name: Bootstrap | Remove get-pip.py - file: path=~/get-pip.py state=absent + file: + path: ~/get-pip.py + state: absent when: (need_pip | failed) - name: Bootstrap | Install pip launcher - copy: src=runner dest=/opt/bin/pip mode=0755 + copy: + src: runner + dest: /opt/bin/pip + mode: 0755 when: (need_pip | failed) - name: Install required python modules diff --git a/roles/bootstrap-os/tasks/setup-pipelining.yml b/roles/bootstrap-os/tasks/setup-pipelining.yml index ca216cc3b..498316630 100644 --- a/roles/bootstrap-os/tasks/setup-pipelining.yml +++ b/roles/bootstrap-os/tasks/setup-pipelining.yml @@ -2,5 +2,8 @@ # Remove requiretty to make ssh pipelining work - name: Remove require tty - lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent + lineinfile: + regexp: "^\w+\s+requiretty" + dest: /etc/sudoers + state: absent diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index d7e65fe96..f8654a262 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -34,7 +34,8 @@ register: dnsmasq_config - name: Stat dnsmasq configuration - stat: path=/etc/dnsmasq.d/01-kube-dns.conf + stat: + path: /etc/dnsmasq.d/01-kube-dns.conf register: sym - name: Move previous configuration @@ -49,7 +50,9 @@ state: link - name: Create dnsmasq manifests - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-svc.yml, type: svc} diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index e92bf3a51..90d7aacb8 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -23,7 +23,9 @@ state: restarted - name: Docker | pause while Docker restarts - pause: seconds=10 prompt="Waiting for docker restart" + pause: + seconds: 10 + prompt: "Waiting for docker restart" - name: Docker | wait for docker command: "{{ docker_bin_dir }}/docker images" diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 2f629802f..f17c1bde2 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -51,13 +51,16 @@ when: system_search_domains.stdout != "" - name: check number of nameservers - fail: msg="Too many nameservers" + fail: + msg: "Too many nameservers" when: docker_dns_servers|length > 3 - name: check number of search domains - fail: msg="Too many search domains" + fail: + msg: "Too many search domains" when: docker_dns_search_domains|length > 6 - name: check length of search domains - fail: msg="Search domains exceeded limit of 256 characters" + fail: + msg: "Search domains exceeded limit of 256 characters" when: docker_dns_search_domains|join(' ')|length > 256 diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index a107ab462..18710ac49 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -1,6 +1,8 @@ --- - name: Create docker service systemd directory if it doesn't exist - file: path=/etc/systemd/system/docker.service.d state=directory + file: + path: /etc/systemd/system/docker.service.d + state: directory - name: Write docker proxy drop-in template: diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 7b49f4f0e..b4eb79053 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -5,7 +5,10 @@ when: "{{ download.enabled|bool and not download.container|bool }}" - name: Create dest directories - file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes + file: + path: "{{local_release_dir}}/{{download.dest|dirname}}" + state: directory + recurse: yes when: "{{ download.enabled|bool and not download.container|bool }}" tags: bootstrap-os @@ -44,7 +47,12 @@ tags: facts - name: Create dest directory for saved/loaded container images - file: path="{{local_release_dir}}/containers" state=directory recurse=yes mode=0755 owner={{ansible_ssh_user|default(ansible_user_id)}} + file: + path: "{{local_release_dir}}/containers" + state: directory + recurse: yes + mode: 0755 + owner: "{{ansible_ssh_user|default(ansible_user_id)}}" when: "{{ download.enabled|bool and download.container|bool }}" tags: bootstrap-os @@ -58,7 +66,10 @@ tags: localhost - name: Download | create local directory for saved/loaded container images - file: path="{{local_release_dir}}/containers" state=directory recurse=yes + file: + path: "{{local_release_dir}}/containers" + state: directory + recurse: yes delegate_to: localhost become: false run_once: true @@ -105,7 +116,8 @@ tags: facts - name: Stat saved container image - stat: path="{{fname}}" + stat: + path: "{{fname}}" register: img changed_when: false when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}" diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml index 6fe4af440..3f3e20fe1 100644 --- a/roles/download/tasks/set_docker_image_facts.yml +++ b/roles/download/tasks/set_docker_image_facts.yml @@ -15,7 +15,8 @@ check_mode: no when: not download_always_pull|bool -- set_fact: docker_images="{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}" +- set_fact: + docker_images: "{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}" when: not download_always_pull|bool - set_fact: diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index 8c790e9cd..56dd9f431 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -16,7 +16,9 @@ when: is_etcd_master - name: wait for etcd up - uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" + validate_certs: no register: result until: result.status is defined and result.status == 200 retries: 10 diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 9ed08ddf9..55b54e1ac 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -1,11 +1,11 @@ --- - name: Gen_certs | create etcd cert dir file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=root - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: root + recurse: yes - name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})" file: @@ -17,11 +17,11 @@ - name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})" file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=root - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: root + recurse: yes run_once: yes delegate_to: "{{groups['etcd'][0]}}" @@ -123,11 +123,11 @@ - name: Gen_certs | check certificate permissions file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=kube - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: kube + recurse: yes - name: Gen_certs | set permissions on keys shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 6e952cd33..1af265736 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -5,6 +5,7 @@ - include: check_certs.yml when: cert_management == "script" tags: [etcd-secrets, facts] + - include: gen_certs_script.yml when: cert_management == "script" tags: etcd-secrets @@ -12,9 +13,11 @@ - include: sync_etcd_master_certs.yml when: cert_management == "vault" and inventory_hostname in groups.etcd tags: etcd-secrets + - include: sync_etcd_node_certs.yml when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts tags: etcd-secrets + - include: gen_certs_vault.yml when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d()) tags: etcd-secrets @@ -22,10 +25,13 @@ - include: "install_{{ etcd_deployment_type }}.yml" when: is_etcd_master tags: upgrade + - include: set_cluster_health.yml when: is_etcd_master + - include: configure.yml when: is_etcd_master + - include: refresh_config.yml when: is_etcd_master @@ -50,5 +56,6 @@ # state insted of `new`. - include: set_cluster_health.yml when: is_etcd_master + - include: refresh_config.yml when: is_etcd_master diff --git a/roles/kernel-upgrade/tasks/centos-7.yml b/roles/kernel-upgrade/tasks/centos-7.yml index b3181c213..a9de6b56f 100644 --- a/roles/kernel-upgrade/tasks/centos-7.yml +++ b/roles/kernel-upgrade/tasks/centos-7.yml @@ -1,7 +1,9 @@ --- - name: install ELRepo key - rpm_key: state=present key='{{ elrepo_key_url }}' + rpm_key: + state: present + key: '{{ elrepo_key_url }}' - name: install elrepo repository yum: @@ -9,7 +11,10 @@ state: present - name: upgrade kernel - yum: name={{elrepo_kernel_package}} state=present enablerepo=elrepo-kernel + yum: + name: "{{elrepo_kernel_package}}" + state: present + enablerepo: elrepo-kernel register: upgrade - name: change default grub entry diff --git a/roles/kernel-upgrade/tasks/reboot.yml b/roles/kernel-upgrade/tasks/reboot.yml index 51c383386..5e01dd8fc 100644 --- a/roles/kernel-upgrade/tasks/reboot.yml +++ b/roles/kernel-upgrade/tasks/reboot.yml @@ -8,23 +8,33 @@ shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" & - name: Wait for some seconds - pause: seconds=10 + pause: + seconds: 10 - set_fact: is_bastion: "{{ inventory_hostname == 'bastion' }}" wait_for_delegate: "localhost" + - set_fact: wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}" when: "{{ 'bastion' in groups['all'] }}" - name: wait for bastion to come back - wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + delay: 10 + timeout: 300 become: false delegate_to: localhost when: "is_bastion" - name: waiting for server to come back (using bastion if necessary) - wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + delay: 10 + timeout: 300 become: false delegate_to: "{{ wait_for_delegate }}" when: "not is_bastion" diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml index 447fb719f..c6a6bd94d 100644 --- a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml +++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml @@ -5,7 +5,9 @@ tags: facts - name: Write calico-policy-controller yaml - template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml + template: + src: calico-policy-controller.yml.j2 + dest: "{{kube_config_dir}}/calico-policy-controller.yml" when: inventory_hostname == groups['kube-master'][0] - name: Start of Calico policy controller diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yml similarity index 90% rename from roles/kubernetes-apps/ansible/tasks/main.yaml rename to roles/kubernetes-apps/ansible/tasks/main.yml index 787fa156a..04554e785 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yaml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver - uri: url=http://localhost:8080/healthz + uri: + url: http://localhost:8080/healthz register: result until: result.status == 200 retries: 10 @@ -8,7 +9,9 @@ when: inventory_hostname == groups['kube-master'][0] - name: Kubernetes Apps | Lay Down KubeDNS Template - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: kubedns-rc.yml, type: rc} - {file: kubedns-svc.yml, type: svc} diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 0413e4bb6..6319d1c1c 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,5 +1,7 @@ - name: Kubernetes Apps | Lay Down Netchecker Template - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 9fd691ddd..edf2509d5 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -1,2 +1,3 @@ --- -- debug: msg="No helm charts" +- debug: + msg: "No helm charts" diff --git a/roles/kubernetes-apps/kpm/tasks/main.yaml b/roles/kubernetes-apps/kpm/tasks/main.yml similarity index 100% rename from roles/kubernetes-apps/kpm/tasks/main.yaml rename to roles/kubernetes-apps/kpm/tasks/main.yml diff --git a/roles/kubernetes-apps/meta/main.yaml b/roles/kubernetes-apps/meta/main.yml similarity index 100% rename from roles/kubernetes-apps/meta/main.yaml rename to roles/kubernetes-apps/meta/main.yml diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml similarity index 100% rename from roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml rename to roles/kubernetes-apps/network_plugin/canal/tasks/main.yml diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 385482276..38edeeb1f 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -22,21 +22,24 @@ state: restarted - name: Master | wait for kube-scheduler - uri: url=http://localhost:10251/healthz + uri: + url: http://localhost:10251/healthz register: scheduler_result until: scheduler_result.status == 200 retries: 15 delay: 5 - name: Master | wait for kube-controller-manager - uri: url=http://localhost:10252/healthz + uri: + url: http://localhost:10252/healthz register: controller_manager_result until: controller_manager_result.status == 200 retries: 15 delay: 5 - name: Master | wait for the apiserver to be running - uri: url=http://localhost:8080/healthz + uri: + url: http://localhost:8080/healthz register: result until: result.status == 200 retries: 10 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index a622594a1..67a64d4a6 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -36,7 +36,9 @@ - meta: flush_handlers - name: copy kube system namespace manifest - copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml + copy: + src: namespace.yml + dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" run_once: yes when: inventory_hostname == groups['kube-master'][0] tags: apps diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 8c6bf7bb1..1bb0c0344 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -43,7 +43,8 @@ when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod" - pause: seconds=20 + pause: + seconds: 20 when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists tags: kube-apiserver diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 2c18937c9..5b7453132 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -12,12 +12,18 @@ tags: nginx - name: Write kubelet config file - template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes + template: + src: kubelet.j2 + dest: "{{ kube_config_dir }}/kubelet.env" + backup: yes notify: restart kubelet tags: kubelet - name: write the kubecfg (auth) file for kubelet - template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes + template: + src: node-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + backup: yes notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml index 885b84f8f..36cb32592 100644 --- a/roles/kubernetes/node/tasks/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/nginx-proxy.yml @@ -1,9 +1,20 @@ --- - name: nginx-proxy | Write static pod - template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml + template: + src: manifests/nginx-proxy.manifest.j2 + dest: "{{kube_manifest_dir}}/nginx-proxy.yml" - name: nginx-proxy | Make nginx directory - file: path=/etc/nginx state=directory mode=0700 owner=root + file: + path: /etc/nginx + state: directory + mode: 0700 + owner: root - name: nginx-proxy | Write nginx-proxy configuration - template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes + template: + src: nginx.conf.j2 + dest: "/etc/nginx/nginx.conf" + owner: root + mode: 0755 + backup: yes diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml index f233f4c1d..10e5bba68 100644 --- a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml +++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml @@ -14,7 +14,9 @@ notify: Preinstall | restart network - name: Remove kargo specific dhclient hook - file: path="{{ dhclienthookfile }}" state=absent + file: + path: "{{ dhclienthookfile }}" + state: absent when: dhclienthookfile is defined notify: Preinstall | restart network diff --git a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml index afd5ff229..2df6962e8 100644 --- a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml @@ -3,7 +3,9 @@ # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time - name: install growpart - package: name=cloud-utils-growpart state=latest + package: + name: cloud-utils-growpart + state: latest - name: check if growpart needs to be run command: growpart -N /dev/sda 1 diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index f8b4987d6..402361d66 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -88,12 +88,18 @@ tags: [network, calico, weave, canal, bootstrap-os] - name: Update package management cache (YUM) - yum: update_cache=yes name='*' + yum: + update_cache: yes + name: '*' when: ansible_pkg_mgr == 'yum' tags: bootstrap-os - name: Install latest version of python-apt for Debian distribs - apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600 + apt: + name: python-apt + state: latest + update_cache: yes + cache_valid_time: 3600 when: ansible_os_family == "Debian" tags: bootstrap-os @@ -126,7 +132,9 @@ # Todo : selinux configuration - name: Set selinux policy to permissive - selinux: policy=targeted state=permissive + selinux: + policy: targeted + state: permissive when: ansible_os_family == "RedHat" changed_when: False tags: bootstrap-os @@ -146,7 +154,8 @@ tags: bootstrap-os - name: Stat sysctl file configuration - stat: path={{sysctl_file_path}} + stat: + path: "{{sysctl_file_path}}" register: sysctl_file_stat tags: bootstrap-os @@ -198,7 +207,8 @@ tags: [bootstrap-os, resolvconf] - name: Check if we are running inside a Azure VM - stat: path=/var/lib/waagent/ + stat: + path: /var/lib/waagent/ register: azure_check tags: bootstrap-os diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 456467a97..214aeccef 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -1,12 +1,23 @@ --- -- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}" -- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}" -- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}" -- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}" -- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" +- set_fact: + kube_apiserver_count: "{{ groups['kube-master'] | length }}" + +- set_fact: + kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}" + +- set_fact: + kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" + +- set_fact: + is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" + +- set_fact: + first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" + - set_fact: loadbalancer_apiserver_localhost: false when: loadbalancer_apiserver is defined + - set_fact: kube_apiserver_endpoint: |- {% if not is_kube_master and loadbalancer_apiserver_localhost -%} @@ -21,34 +32,54 @@ {%- endif -%} {%- endif %} -- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}" -- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}" -- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380" -- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379" -- set_fact: etcd_authority="127.0.0.1:2379" -- set_fact: etcd_endpoint="https://{{ etcd_authority }}" +- set_fact: + etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}" + +- set_fact: + etcd_access_address: "{{ access_ip | default(etcd_address) }}" + +- set_fact: + etcd_peer_url: "https://{{ etcd_access_address }}:2380" + +- set_fact: + etcd_client_url: "https://{{ etcd_access_address }}:2379" + +- set_fact: + etcd_authority: "127.0.0.1:2379" + +- set_fact: + etcd_endpoint: "https://{{ etcd_authority }}" + - set_fact: etcd_access_addresses: |- {% for item in groups['etcd'] -%} https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %} {%- endfor %} -- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" + +- set_fact: + etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" + - set_fact: etcd_member_name: |- {% for host in groups['etcd'] %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} {% endfor %} + - set_fact: etcd_peer_addresses: |- {% for item in groups['etcd'] -%} {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %} {%- endfor %} + - set_fact: is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" + - set_fact: etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=") + - set_fact: etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}" + - set_fact: peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml index c2413e89f..ffea74b40 100644 --- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml @@ -39,11 +39,13 @@ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: target temporary resolvconf cloud init file (Container Linux by CoreOS) - set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf + set_fact: + resolvconffile: /tmp/resolveconf_cloud_init_conf when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: check if /etc/dhclient.conf exists - stat: path=/etc/dhclient.conf + stat: + path: /etc/dhclient.conf register: dhclient_stat - name: target dhclient conf file for /etc/dhclient.conf @@ -52,7 +54,8 @@ when: dhclient_stat.stat.exists - name: check if /etc/dhcp/dhclient.conf exists - stat: path=/etc/dhcp/dhclient.conf + stat: + path: /etc/dhcp/dhclient.conf register: dhcp_dhclient_stat - name: target dhclient conf file for /etc/dhcp/dhclient.conf diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index fd6e485df..1257af8c9 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -142,10 +142,10 @@ - name: Gen_certs | check certificate permissions file: - path={{ kube_cert_dir }} - group={{ kube_cert_group }} - owner=kube - recurse=yes + path: "{{ kube_cert_dir }}" + group: "{{ kube_cert_group }}" + owner: kube + recurse: yes - name: Gen_certs | set permissions on keys shell: chmod 0600 {{ kube_cert_dir}}/*key.pem diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index f442b62b3..ab2cb76b2 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -1,29 +1,30 @@ --- - include: check-certs.yml tags: [k8s-secrets, facts] + - include: check-tokens.yml tags: [k8s-secrets, facts] - name: Make sure the certificate directory exits file: - path={{ kube_cert_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_cert_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Make sure the tokens directory exits file: - path={{ kube_token_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_token_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Make sure the users directory exits file: - path={{ kube_users_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_users_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Populate users for basic auth in API lineinfile: @@ -62,10 +63,10 @@ - name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})" file: - path={{ kube_token_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_token_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" run_once: yes delegate_to: "{{groups['kube-master'][0]}}" when: gen_tokens|default(false) @@ -77,9 +78,11 @@ - include: sync_kube_master_certs.yml when: cert_management == "vault" and inventory_hostname in groups['kube-master'] tags: k8s-secrets + - include: sync_kube_node_certs.yml when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster'] tags: k8s-secrets + - include: gen_certs_vault.yml when: cert_management == "vault" tags: k8s-secrets diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index efe4616d2..5197aa005 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -35,11 +35,15 @@ group: root - name: Calico-rr | Write calico-rr.env for systemd init file - template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env + template: + src: calico-rr.env.j2 + dest: /etc/calico/calico-rr.env notify: restart calico-rr - name: Calico-rr | Write calico-rr systemd init file - template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service + template: + src: calico-rr.service.j2 + dest: /etc/systemd/system/calico-rr.service notify: restart calico-rr - name: Calico-rr | Configure route reflector diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 6d738bd37..eefed471f 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -60,7 +60,9 @@ tags: [hyperkube, upgrade] - name: Calico | wait for etcd - uri: url=https://localhost:2379/health validate_certs=no + uri: + url: https://localhost:2379/health + validate_certs: no register: result until: result.status == 200 or result.status == 401 retries: 10 @@ -160,17 +162,23 @@ when: legacy_calicoctl - name: Calico (old) | Write calico-node systemd init file - template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service + template: + src: calico-node.service.legacy.j2 + dest: /etc/systemd/system/calico-node.service when: legacy_calicoctl notify: restart calico-node - name: Calico | Write calico.env for systemd init file - template: src=calico.env.j2 dest=/etc/calico/calico.env + template: + src: calico.env.j2 + dest: /etc/calico/calico.env when: not legacy_calicoctl notify: restart calico-node - name: Calico | Write calico-node systemd init file - template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service + template: + src: calico-node.service.j2 + dest: /etc/systemd/system/calico-node.service when: not legacy_calicoctl notify: restart calico-node diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 82810ac98..98c93a53a 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -28,7 +28,9 @@ state: restarted - name: Flannel | pause while Docker restarts - pause: seconds=10 prompt="Waiting for docker restart" + pause: + seconds: 10 + prompt: "Waiting for docker restart" - name: Flannel | wait for docker command: "{{ docker_bin_dir }}/docker images" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 52cb19370..5b17a094b 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -1,7 +1,9 @@ --- - name: reset | stop services - service: name={{ item }} state=stopped + service: + name: "{{ item }}" + state: stopped with_items: - kubelet - etcd @@ -33,7 +35,9 @@ shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" - name: reset | restart docker if needed - service: name=docker state=restarted + service: + name: docker + state: restarted when: docker_dropins_removed.changed - name: reset | gather mounted kubelet dirs @@ -46,7 +50,9 @@ with_items: '{{ mounted_dirs.stdout_lines }}' - name: reset | delete some files and directories - file: path={{ item }} state=absent + file: + path: "{{ item }}" + state: absent with_items: - "{{kube_config_dir}}" - /var/lib/kubelet diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml index 2d6000599..a770020c2 100644 --- a/roles/uploads/tasks/main.yml +++ b/roles/uploads/tasks/main.yml @@ -1,6 +1,9 @@ --- - name: Create dest directories - file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes + file: + path: "{{local_release_dir}}/{{item.dest|dirname}}" + state: directory + recurse: yes with_items: '{{downloads}}' - name: Download items diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index edd2912d3..98904bbe7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -2,8 +2,10 @@ - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault + - include: sync_secrets.yml when: inventory_hostname in groups.vault + - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() @@ -54,5 +56,6 @@ - include: role_auth_cert.yml when: vault_role_auth_method == "cert" + - include: role_auth_userpass.yml when: vault_role_auth_method == "userpass" diff --git a/roles/vault/tasks/bootstrap/role_auth_cert.yml b/roles/vault/tasks/bootstrap/role_auth_cert.yml index 7bbf58e86..d92cd9d69 100644 --- a/roles/vault/tasks/bootstrap/role_auth_cert.yml +++ b/roles/vault/tasks/bootstrap/role_auth_cert.yml @@ -21,5 +21,6 @@ ca_name: auth-ca mount_name: auth-pki when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed + - include: create_etcd_role.yml when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/role_auth_userpass.yml b/roles/vault/tasks/bootstrap/role_auth_userpass.yml index ad09ab05b..2ad2fbc91 100644 --- a/roles/vault/tasks/bootstrap/role_auth_userpass.yml +++ b/roles/vault/tasks/bootstrap/role_auth_userpass.yml @@ -6,5 +6,6 @@ auth_backend_path: userpass auth_backend_type: userpass when: inventory_hostname == groups.vault|first + - include: create_etcd_role.yml when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index 5dab550aa..db97dd078 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -2,6 +2,7 @@ - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault + - include: ../shared/check_etcd.yml when: inventory_hostname in groups.vault @@ -9,18 +10,25 @@ - include: configure.yml when: inventory_hostname in groups.vault + - include: binary.yml when: inventory_hostname in groups.vault and vault_deployment_type == "host" + - include: systemd.yml when: inventory_hostname in groups.vault + - include: init.yml when: inventory_hostname in groups.vault + - include: unseal.yml when: inventory_hostname in groups.vault + - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault + - include: ../shared/pki_mount.yml when: inventory_hostname == groups.vault|first + - include: ../shared/config_ca.yml vars: ca_name: ca @@ -31,5 +39,6 @@ - include: role_auth_cert.yml when: vault_role_auth_method == "cert" + - include: role_auth_userpass.yml when: vault_role_auth_method == "userpass"