diff --git a/README.md b/README.md index eb896255d..0864d2ce9 100644 --- a/README.md +++ b/README.md @@ -67,12 +67,13 @@ plugins can be deployed for a given single cluster. Requirements -------------- +* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine + that will run Ansible commands** * The target servers must have **access to the Internet** in order to pull docker images. +* The target servers are configured to allow **IPv4 forwarding**. +* **Your ssh key must be copied** to all the servers part of your inventory. * The **firewalls are not managed**, you'll need to implement your own rules the way you used to. in order to avoid any issue during deployment you should disable your firewall. -* The target servers are configured to allow **IPv4 forwarding**. -* **Copy your ssh keys** to all the servers part of your inventory. -* **Ansible v2.2 (or newer) and python-netaddr** ## Network plugins diff --git a/docs/azure.md b/docs/azure.md index 6b75f2fce..4aeabde71 100644 --- a/docs/azure.md +++ b/docs/azure.md @@ -1,7 +1,7 @@ Azure =============== -To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`. +To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`. All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in. @@ -49,8 +49,8 @@ This is the AppId from the last command - Create the role assignment with: `azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID` -azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret. +azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret. ## Provisioning Azure with Resource Group Templates -You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) \ No newline at end of file +You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 2e882af92..606f226be 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -24,7 +24,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens" kube_users_dir: "{{ kube_config_dir }}/users" ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.5.1 +kube_version: v1.5.3 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/reset.yml b/reset.yml index 9cf504756..42a188ccc 100644 --- a/reset.yml +++ b/reset.yml @@ -9,7 +9,8 @@ pre_tasks: - name: check confirmation - fail: msg="Reset confirmation failed" + fail: + msg: "Reset confirmation failed" when: reset_confirmation != "yes" roles: diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml index 394ff9294..43ec8ebbb 100644 --- a/roles/adduser/tasks/main.yml +++ b/roles/adduser/tasks/main.yml @@ -1,6 +1,8 @@ --- - name: User | Create User Group - group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}} + group: + name: "{{user.group|default(user.name)}}" + system: "{{user.system|default(omit)}}" - name: User | Create User user: diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml index d1aae5ca8..2d240a560 100644 --- a/roles/bastion-ssh-config/tasks/main.yml +++ b/roles/bastion-ssh-config/tasks/main.yml @@ -15,4 +15,6 @@ - name: create ssh bastion conf become: false - template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf" + template: + src: ssh-bastion.conf + dest: "{{ playbook_dir }}/ssh-bastion.conf" diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml index 9c41ae84c..b8cf126c1 100644 --- a/roles/bootstrap-os/tasks/bootstrap-centos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -1,7 +1,8 @@ --- - name: Check presence of fastestmirror.conf - stat: path=/etc/yum/pluginconf.d/fastestmirror.conf + stat: + path: /etc/yum/pluginconf.d/fastestmirror.conf register: fastestmirror # fastestmirror plugin actually slows down Ansible deployments diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml index 9ef440e59..b806d9f6d 100644 --- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -23,7 +23,9 @@ tags: facts - name: Bootstrap | Copy get-pip.py - copy: src=get-pip.py dest=~/get-pip.py + copy: + src: get-pip.py + dest: ~/get-pip.py when: (need_pip | failed) - name: Bootstrap | Install pip @@ -31,11 +33,16 @@ when: (need_pip | failed) - name: Bootstrap | Remove get-pip.py - file: path=~/get-pip.py state=absent + file: + path: ~/get-pip.py + state: absent when: (need_pip | failed) - name: Bootstrap | Install pip launcher - copy: src=runner dest=/opt/bin/pip mode=0755 + copy: + src: runner + dest: /opt/bin/pip + mode: 0755 when: (need_pip | failed) - name: Install required python modules diff --git a/roles/bootstrap-os/tasks/setup-pipelining.yml b/roles/bootstrap-os/tasks/setup-pipelining.yml index ca216cc3b..7143f260e 100644 --- a/roles/bootstrap-os/tasks/setup-pipelining.yml +++ b/roles/bootstrap-os/tasks/setup-pipelining.yml @@ -2,5 +2,8 @@ # Remove requiretty to make ssh pipelining work - name: Remove require tty - lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent + lineinfile: + regexp: '^\w+\s+requiretty' + dest: /etc/sudoers + state: absent diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index d7e65fe96..f8654a262 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -34,7 +34,8 @@ register: dnsmasq_config - name: Stat dnsmasq configuration - stat: path=/etc/dnsmasq.d/01-kube-dns.conf + stat: + path: /etc/dnsmasq.d/01-kube-dns.conf register: sym - name: Move previous configuration @@ -49,7 +50,9 @@ state: link - name: Create dnsmasq manifests - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-svc.yml, type: svc} diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index e92bf3a51..90d7aacb8 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -23,7 +23,9 @@ state: restarted - name: Docker | pause while Docker restarts - pause: seconds=10 prompt="Waiting for docker restart" + pause: + seconds: 10 + prompt: "Waiting for docker restart" - name: Docker | wait for docker command: "{{ docker_bin_dir }}/docker images" diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 2f629802f..f17c1bde2 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -51,13 +51,16 @@ when: system_search_domains.stdout != "" - name: check number of nameservers - fail: msg="Too many nameservers" + fail: + msg: "Too many nameservers" when: docker_dns_servers|length > 3 - name: check number of search domains - fail: msg="Too many search domains" + fail: + msg: "Too many search domains" when: docker_dns_search_domains|length > 6 - name: check length of search domains - fail: msg="Search domains exceeded limit of 256 characters" + fail: + msg: "Search domains exceeded limit of 256 characters" when: docker_dns_search_domains|join(' ')|length > 256 diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index a107ab462..18710ac49 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -1,6 +1,8 @@ --- - name: Create docker service systemd directory if it doesn't exist - file: path=/etc/systemd/system/docker.service.d state=directory + file: + path: /etc/systemd/system/docker.service.d + state: directory - name: Write docker proxy drop-in template: diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 7b49f4f0e..b4eb79053 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -5,7 +5,10 @@ when: "{{ download.enabled|bool and not download.container|bool }}" - name: Create dest directories - file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes + file: + path: "{{local_release_dir}}/{{download.dest|dirname}}" + state: directory + recurse: yes when: "{{ download.enabled|bool and not download.container|bool }}" tags: bootstrap-os @@ -44,7 +47,12 @@ tags: facts - name: Create dest directory for saved/loaded container images - file: path="{{local_release_dir}}/containers" state=directory recurse=yes mode=0755 owner={{ansible_ssh_user|default(ansible_user_id)}} + file: + path: "{{local_release_dir}}/containers" + state: directory + recurse: yes + mode: 0755 + owner: "{{ansible_ssh_user|default(ansible_user_id)}}" when: "{{ download.enabled|bool and download.container|bool }}" tags: bootstrap-os @@ -58,7 +66,10 @@ tags: localhost - name: Download | create local directory for saved/loaded container images - file: path="{{local_release_dir}}/containers" state=directory recurse=yes + file: + path: "{{local_release_dir}}/containers" + state: directory + recurse: yes delegate_to: localhost become: false run_once: true @@ -105,7 +116,8 @@ tags: facts - name: Stat saved container image - stat: path="{{fname}}" + stat: + path: "{{fname}}" register: img changed_when: false when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}" diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml index 33d6d471e..0efda4d09 100644 --- a/roles/download/tasks/set_docker_image_facts.yml +++ b/roles/download/tasks/set_docker_image_facts.yml @@ -16,7 +16,8 @@ check_mode: no when: not download_always_pull|bool -- set_fact: docker_images="{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}" +- set_fact: + docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}" no_log: true when: not download_always_pull|bool diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index 8c790e9cd..56dd9f431 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -16,7 +16,9 @@ when: is_etcd_master - name: wait for etcd up - uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" + validate_certs: no register: result until: result.status is defined and result.status == 200 retries: 10 diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index f95ec97ca..bb6d55660 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -1,11 +1,11 @@ --- - name: Gen_certs | create etcd cert dir file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=root - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: root + recurse: yes - name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})" file: @@ -17,11 +17,11 @@ - name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})" file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=root - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: root + recurse: yes run_once: yes delegate_to: "{{groups['etcd'][0]}}" @@ -126,11 +126,11 @@ - name: Gen_certs | check certificate permissions file: - path={{ etcd_cert_dir }} - group={{ etcd_cert_group }} - state=directory - owner=kube - recurse=yes + path: "{{ etcd_cert_dir }}" + group: "{{ etcd_cert_group }}" + state: directory + owner: kube + recurse: yes - name: Gen_certs | set permissions on keys shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 6e952cd33..1af265736 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -5,6 +5,7 @@ - include: check_certs.yml when: cert_management == "script" tags: [etcd-secrets, facts] + - include: gen_certs_script.yml when: cert_management == "script" tags: etcd-secrets @@ -12,9 +13,11 @@ - include: sync_etcd_master_certs.yml when: cert_management == "vault" and inventory_hostname in groups.etcd tags: etcd-secrets + - include: sync_etcd_node_certs.yml when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts tags: etcd-secrets + - include: gen_certs_vault.yml when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d()) tags: etcd-secrets @@ -22,10 +25,13 @@ - include: "install_{{ etcd_deployment_type }}.yml" when: is_etcd_master tags: upgrade + - include: set_cluster_health.yml when: is_etcd_master + - include: configure.yml when: is_etcd_master + - include: refresh_config.yml when: is_etcd_master @@ -50,5 +56,6 @@ # state insted of `new`. - include: set_cluster_health.yml when: is_etcd_master + - include: refresh_config.yml when: is_etcd_master diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml index eb17e9871..d498a0336 100644 --- a/roles/etcd/tasks/pre_upgrade.yml +++ b/roles/etcd/tasks/pre_upgrade.yml @@ -34,6 +34,11 @@ command: "{{ docker_bin_dir }}/docker rm -f {{item}}" with_items: "{{etcd_proxy_container.stdout_lines}}" +- name: "Pre-upgrade | see if etcdctl is installed" + stat: + path: "{{ bin_dir }}/etcdctl" + register: etcdctl_installed + - name: "Pre-upgrade | check if member list is non-SSL" command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list" register: etcd_member_list @@ -41,6 +46,7 @@ delay: 3 until: etcd_member_list.rc != 2 run_once: true + when: etcdctl_installed.stat.exists failed_when: false - name: "Pre-upgrade | change peer names to SSL" @@ -48,4 +54,4 @@ {{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list | awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash run_once: true - when: 'etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout' + when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout' diff --git a/roles/kernel-upgrade/tasks/centos-7.yml b/roles/kernel-upgrade/tasks/centos-7.yml index b3181c213..a9de6b56f 100644 --- a/roles/kernel-upgrade/tasks/centos-7.yml +++ b/roles/kernel-upgrade/tasks/centos-7.yml @@ -1,7 +1,9 @@ --- - name: install ELRepo key - rpm_key: state=present key='{{ elrepo_key_url }}' + rpm_key: + state: present + key: '{{ elrepo_key_url }}' - name: install elrepo repository yum: @@ -9,7 +11,10 @@ state: present - name: upgrade kernel - yum: name={{elrepo_kernel_package}} state=present enablerepo=elrepo-kernel + yum: + name: "{{elrepo_kernel_package}}" + state: present + enablerepo: elrepo-kernel register: upgrade - name: change default grub entry diff --git a/roles/kernel-upgrade/tasks/reboot.yml b/roles/kernel-upgrade/tasks/reboot.yml index 51c383386..5e01dd8fc 100644 --- a/roles/kernel-upgrade/tasks/reboot.yml +++ b/roles/kernel-upgrade/tasks/reboot.yml @@ -8,23 +8,33 @@ shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" & - name: Wait for some seconds - pause: seconds=10 + pause: + seconds: 10 - set_fact: is_bastion: "{{ inventory_hostname == 'bastion' }}" wait_for_delegate: "localhost" + - set_fact: wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}" when: "{{ 'bastion' in groups['all'] }}" - name: wait for bastion to come back - wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + delay: 10 + timeout: 300 become: false delegate_to: localhost when: "is_bastion" - name: waiting for server to come back (using bastion if necessary) - wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + delay: 10 + timeout: 300 become: false delegate_to: "{{ wait_for_delegate }}" when: "not is_bastion" diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml index 447fb719f..c6a6bd94d 100644 --- a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml +++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml @@ -5,7 +5,9 @@ tags: facts - name: Write calico-policy-controller yaml - template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml + template: + src: calico-policy-controller.yml.j2 + dest: "{{kube_config_dir}}/calico-policy-controller.yml" when: inventory_hostname == groups['kube-master'][0] - name: Start of Calico policy controller diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yml similarity index 90% rename from roles/kubernetes-apps/ansible/tasks/main.yaml rename to roles/kubernetes-apps/ansible/tasks/main.yml index 787fa156a..04554e785 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yaml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver - uri: url=http://localhost:8080/healthz + uri: + url: http://localhost:8080/healthz register: result until: result.status == 200 retries: 10 @@ -8,7 +9,9 @@ when: inventory_hostname == groups['kube-master'][0] - name: Kubernetes Apps | Lay Down KubeDNS Template - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: kubedns-rc.yml, type: rc} - {file: kubedns-svc.yml, type: svc} diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 0413e4bb6..6319d1c1c 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,5 +1,7 @@ - name: Kubernetes Apps | Lay Down Netchecker Template - template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 9fd691ddd..edf2509d5 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -1,2 +1,3 @@ --- -- debug: msg="No helm charts" +- debug: + msg: "No helm charts" diff --git a/roles/kubernetes-apps/kpm/tasks/main.yaml b/roles/kubernetes-apps/kpm/tasks/main.yml similarity index 100% rename from roles/kubernetes-apps/kpm/tasks/main.yaml rename to roles/kubernetes-apps/kpm/tasks/main.yml diff --git a/roles/kubernetes-apps/meta/main.yaml b/roles/kubernetes-apps/meta/main.yml similarity index 100% rename from roles/kubernetes-apps/meta/main.yaml rename to roles/kubernetes-apps/meta/main.yml diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml similarity index 100% rename from roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml rename to roles/kubernetes-apps/network_plugin/canal/tasks/main.yml diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 385482276..38edeeb1f 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -22,21 +22,24 @@ state: restarted - name: Master | wait for kube-scheduler - uri: url=http://localhost:10251/healthz + uri: + url: http://localhost:10251/healthz register: scheduler_result until: scheduler_result.status == 200 retries: 15 delay: 5 - name: Master | wait for kube-controller-manager - uri: url=http://localhost:10252/healthz + uri: + url: http://localhost:10252/healthz register: controller_manager_result until: controller_manager_result.status == 200 retries: 15 delay: 5 - name: Master | wait for the apiserver to be running - uri: url=http://localhost:8080/healthz + uri: + url: http://localhost:8080/healthz register: result until: result.status == 200 retries: 10 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index a622594a1..67a64d4a6 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -36,7 +36,9 @@ - meta: flush_handlers - name: copy kube system namespace manifest - copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml + copy: + src: namespace.yml + dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" run_once: yes when: inventory_hostname == groups['kube-master'][0] tags: apps diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 8c6bf7bb1..1bb0c0344 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -43,7 +43,8 @@ when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod" - pause: seconds=20 + pause: + seconds: 20 when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists tags: kube-apiserver diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 2c18937c9..5b7453132 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -12,12 +12,18 @@ tags: nginx - name: Write kubelet config file - template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes + template: + src: kubelet.j2 + dest: "{{ kube_config_dir }}/kubelet.env" + backup: yes notify: restart kubelet tags: kubelet - name: write the kubecfg (auth) file for kubelet - template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes + template: + src: node-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + backup: yes notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml index 885b84f8f..36cb32592 100644 --- a/roles/kubernetes/node/tasks/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/nginx-proxy.yml @@ -1,9 +1,20 @@ --- - name: nginx-proxy | Write static pod - template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml + template: + src: manifests/nginx-proxy.manifest.j2 + dest: "{{kube_manifest_dir}}/nginx-proxy.yml" - name: nginx-proxy | Make nginx directory - file: path=/etc/nginx state=directory mode=0700 owner=root + file: + path: /etc/nginx + state: directory + mode: 0700 + owner: root - name: nginx-proxy | Write nginx-proxy configuration - template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes + template: + src: nginx.conf.j2 + dest: "/etc/nginx/nginx.conf" + owner: root + mode: 0755 + backup: yes diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml index f233f4c1d..10e5bba68 100644 --- a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml +++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml @@ -14,7 +14,9 @@ notify: Preinstall | restart network - name: Remove kargo specific dhclient hook - file: path="{{ dhclienthookfile }}" state=absent + file: + path: "{{ dhclienthookfile }}" + state: absent when: dhclienthookfile is defined notify: Preinstall | restart network diff --git a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml index afd5ff229..2df6962e8 100644 --- a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml @@ -3,7 +3,9 @@ # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time - name: install growpart - package: name=cloud-utils-growpart state=latest + package: + name: cloud-utils-growpart + state: latest - name: check if growpart needs to be run command: growpart -N /dev/sda 1 diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index f8b4987d6..5b79c101d 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -88,12 +88,18 @@ tags: [network, calico, weave, canal, bootstrap-os] - name: Update package management cache (YUM) - yum: update_cache=yes name='*' + yum: + update_cache: yes + name: '*' when: ansible_pkg_mgr == 'yum' tags: bootstrap-os - name: Install latest version of python-apt for Debian distribs - apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600 + apt: + name: python-apt + state: latest + update_cache: yes + cache_valid_time: 3600 when: ansible_os_family == "Debian" tags: bootstrap-os @@ -125,9 +131,17 @@ tags: bootstrap-os # Todo : selinux configuration -- name: Set selinux policy to permissive - selinux: policy=targeted state=permissive +- name: Confirm selinux deployed + stat: + path: /etc/selinux/config when: ansible_os_family == "RedHat" + register: slc + +- name: Set selinux policy to permissive + selinux: + policy: targeted + state: permissive + when: ansible_os_family == "RedHat" and slc.stat.exists == True changed_when: False tags: bootstrap-os @@ -146,7 +160,8 @@ tags: bootstrap-os - name: Stat sysctl file configuration - stat: path={{sysctl_file_path}} + stat: + path: "{{sysctl_file_path}}" register: sysctl_file_stat tags: bootstrap-os @@ -198,7 +213,8 @@ tags: [bootstrap-os, resolvconf] - name: Check if we are running inside a Azure VM - stat: path=/var/lib/waagent/ + stat: + path: /var/lib/waagent/ register: azure_check tags: bootstrap-os diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 456467a97..214aeccef 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -1,12 +1,23 @@ --- -- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}" -- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}" -- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}" -- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}" -- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" +- set_fact: + kube_apiserver_count: "{{ groups['kube-master'] | length }}" + +- set_fact: + kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}" + +- set_fact: + kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" + +- set_fact: + is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" + +- set_fact: + first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" + - set_fact: loadbalancer_apiserver_localhost: false when: loadbalancer_apiserver is defined + - set_fact: kube_apiserver_endpoint: |- {% if not is_kube_master and loadbalancer_apiserver_localhost -%} @@ -21,34 +32,54 @@ {%- endif -%} {%- endif %} -- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}" -- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}" -- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380" -- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379" -- set_fact: etcd_authority="127.0.0.1:2379" -- set_fact: etcd_endpoint="https://{{ etcd_authority }}" +- set_fact: + etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}" + +- set_fact: + etcd_access_address: "{{ access_ip | default(etcd_address) }}" + +- set_fact: + etcd_peer_url: "https://{{ etcd_access_address }}:2380" + +- set_fact: + etcd_client_url: "https://{{ etcd_access_address }}:2379" + +- set_fact: + etcd_authority: "127.0.0.1:2379" + +- set_fact: + etcd_endpoint: "https://{{ etcd_authority }}" + - set_fact: etcd_access_addresses: |- {% for item in groups['etcd'] -%} https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %} {%- endfor %} -- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" + +- set_fact: + etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" + - set_fact: etcd_member_name: |- {% for host in groups['etcd'] %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} {% endfor %} + - set_fact: etcd_peer_addresses: |- {% for item in groups['etcd'] -%} {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %} {%- endfor %} + - set_fact: is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" + - set_fact: etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=") + - set_fact: etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}" + - set_fact: peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml index c2413e89f..ffea74b40 100644 --- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml @@ -39,11 +39,13 @@ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: target temporary resolvconf cloud init file (Container Linux by CoreOS) - set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf + set_fact: + resolvconffile: /tmp/resolveconf_cloud_init_conf when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: check if /etc/dhclient.conf exists - stat: path=/etc/dhclient.conf + stat: + path: /etc/dhclient.conf register: dhclient_stat - name: target dhclient conf file for /etc/dhclient.conf @@ -52,7 +54,8 @@ when: dhclient_stat.stat.exists - name: check if /etc/dhcp/dhclient.conf exists - stat: path=/etc/dhcp/dhclient.conf + stat: + path: /etc/dhcp/dhclient.conf register: dhcp_dhclient_stat - name: target dhclient conf file for /etc/dhcp/dhclient.conf diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index cd742d647..f75a45d1a 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -146,10 +146,10 @@ - name: Gen_certs | check certificate permissions file: - path={{ kube_cert_dir }} - group={{ kube_cert_group }} - owner=kube - recurse=yes + path: "{{ kube_cert_dir }}" + group: "{{ kube_cert_group }}" + owner: kube + recurse: yes - name: Gen_certs | set permissions on keys shell: chmod 0600 {{ kube_cert_dir}}/*key.pem diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index f442b62b3..ab2cb76b2 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -1,29 +1,30 @@ --- - include: check-certs.yml tags: [k8s-secrets, facts] + - include: check-tokens.yml tags: [k8s-secrets, facts] - name: Make sure the certificate directory exits file: - path={{ kube_cert_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_cert_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Make sure the tokens directory exits file: - path={{ kube_token_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_token_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Make sure the users directory exits file: - path={{ kube_users_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_users_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" - name: Populate users for basic auth in API lineinfile: @@ -62,10 +63,10 @@ - name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})" file: - path={{ kube_token_dir }} - state=directory - mode=o-rwx - group={{ kube_cert_group }} + path: "{{ kube_token_dir }}" + state: directory + mode: o-rwx + group: "{{ kube_cert_group }}" run_once: yes delegate_to: "{{groups['kube-master'][0]}}" when: gen_tokens|default(false) @@ -77,9 +78,11 @@ - include: sync_kube_master_certs.yml when: cert_management == "vault" and inventory_hostname in groups['kube-master'] tags: k8s-secrets + - include: sync_kube_node_certs.yml when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster'] tags: k8s-secrets + - include: gen_certs_vault.yml when: cert_management == "vault" tags: k8s-secrets diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index ac94b6800..d3164286e 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -16,7 +16,7 @@ DNS.5 = localhost DNS.{{ 5 + loop.index }} = {{ host }} {% endfor %} {% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} -{% set idx = groups['kube-master'] | length | int + 5 %} +{% set idx = groups['kube-master'] | length | int + 5 + 1 %} DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} {% endif %} {% for host in groups['kube-master'] %} diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index efe4616d2..5197aa005 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -35,11 +35,15 @@ group: root - name: Calico-rr | Write calico-rr.env for systemd init file - template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env + template: + src: calico-rr.env.j2 + dest: /etc/calico/calico-rr.env notify: restart calico-rr - name: Calico-rr | Write calico-rr systemd init file - template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service + template: + src: calico-rr.service.j2 + dest: /etc/systemd/system/calico-rr.service notify: restart calico-rr - name: Calico-rr | Configure route reflector diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 6d738bd37..eefed471f 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -60,7 +60,9 @@ tags: [hyperkube, upgrade] - name: Calico | wait for etcd - uri: url=https://localhost:2379/health validate_certs=no + uri: + url: https://localhost:2379/health + validate_certs: no register: result until: result.status == 200 or result.status == 401 retries: 10 @@ -160,17 +162,23 @@ when: legacy_calicoctl - name: Calico (old) | Write calico-node systemd init file - template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service + template: + src: calico-node.service.legacy.j2 + dest: /etc/systemd/system/calico-node.service when: legacy_calicoctl notify: restart calico-node - name: Calico | Write calico.env for systemd init file - template: src=calico.env.j2 dest=/etc/calico/calico.env + template: + src: calico.env.j2 + dest: /etc/calico/calico.env when: not legacy_calicoctl notify: restart calico-node - name: Calico | Write calico-node systemd init file - template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service + template: + src: calico-node.service.j2 + dest: /etc/systemd/system/calico-node.service when: not legacy_calicoctl notify: restart calico-node diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 82810ac98..98c93a53a 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -28,7 +28,9 @@ state: restarted - name: Flannel | pause while Docker restarts - pause: seconds=10 prompt="Waiting for docker restart" + pause: + seconds: 10 + prompt: "Waiting for docker restart" - name: Flannel | wait for docker command: "{{ docker_bin_dir }}/docker images" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 52cb19370..5b17a094b 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -1,7 +1,9 @@ --- - name: reset | stop services - service: name={{ item }} state=stopped + service: + name: "{{ item }}" + state: stopped with_items: - kubelet - etcd @@ -33,7 +35,9 @@ shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" - name: reset | restart docker if needed - service: name=docker state=restarted + service: + name: docker + state: restarted when: docker_dropins_removed.changed - name: reset | gather mounted kubelet dirs @@ -46,7 +50,9 @@ with_items: '{{ mounted_dirs.stdout_lines }}' - name: reset | delete some files and directories - file: path={{ item }} state=absent + file: + path: "{{ item }}" + state: absent with_items: - "{{kube_config_dir}}" - /var/lib/kubelet diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml index af9572532..2a23087a4 100644 --- a/roles/uploads/defaults/main.yml +++ b/roles/uploads/defaults/main.yml @@ -4,7 +4,7 @@ local_release_dir: /tmp # Versions etcd_version: v3.0.6 calico_version: v0.23.0 -calico_cni_version: v1.4.2 +calico_cni_version: v1.5.6 weave_version: v1.8.2 # Download URL's @@ -14,8 +14,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" # Checksums -calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548" -calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172" +calico_cni_checksum: "9a6bd6da267c498a1833117777c069f44f720d23226d8459bada2a0b41cb8258" +calico_cni_ipam_checksum: "8d3574736df1ce10ea88fdec94d84dc58642081d3774d2d48249c6ee94ed316d" weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml index 2d6000599..a770020c2 100644 --- a/roles/uploads/tasks/main.yml +++ b/roles/uploads/tasks/main.yml @@ -1,6 +1,9 @@ --- - name: Create dest directories - file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes + file: + path: "{{local_release_dir}}/{{item.dest|dirname}}" + state: directory + recurse: yes with_items: '{{downloads}}' - name: Download items diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index edd2912d3..98904bbe7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -2,8 +2,10 @@ - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault + - include: sync_secrets.yml when: inventory_hostname in groups.vault + - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() @@ -54,5 +56,6 @@ - include: role_auth_cert.yml when: vault_role_auth_method == "cert" + - include: role_auth_userpass.yml when: vault_role_auth_method == "userpass" diff --git a/roles/vault/tasks/bootstrap/role_auth_cert.yml b/roles/vault/tasks/bootstrap/role_auth_cert.yml index 7bbf58e86..d92cd9d69 100644 --- a/roles/vault/tasks/bootstrap/role_auth_cert.yml +++ b/roles/vault/tasks/bootstrap/role_auth_cert.yml @@ -21,5 +21,6 @@ ca_name: auth-ca mount_name: auth-pki when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed + - include: create_etcd_role.yml when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/role_auth_userpass.yml b/roles/vault/tasks/bootstrap/role_auth_userpass.yml index ad09ab05b..2ad2fbc91 100644 --- a/roles/vault/tasks/bootstrap/role_auth_userpass.yml +++ b/roles/vault/tasks/bootstrap/role_auth_userpass.yml @@ -6,5 +6,6 @@ auth_backend_path: userpass auth_backend_type: userpass when: inventory_hostname == groups.vault|first + - include: create_etcd_role.yml when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index 5dab550aa..db97dd078 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -2,6 +2,7 @@ - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault + - include: ../shared/check_etcd.yml when: inventory_hostname in groups.vault @@ -9,18 +10,25 @@ - include: configure.yml when: inventory_hostname in groups.vault + - include: binary.yml when: inventory_hostname in groups.vault and vault_deployment_type == "host" + - include: systemd.yml when: inventory_hostname in groups.vault + - include: init.yml when: inventory_hostname in groups.vault + - include: unseal.yml when: inventory_hostname in groups.vault + - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault + - include: ../shared/pki_mount.yml when: inventory_hostname == groups.vault|first + - include: ../shared/config_ca.yml vars: ca_name: ca @@ -31,5 +39,6 @@ - include: role_auth_cert.yml when: vault_role_auth_method == "cert" + - include: role_auth_userpass.yml when: vault_role_auth_method == "userpass"