diff --git a/.ansible-lint b/.ansible-lint index ececfc573..e1909e966 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -2,15 +2,8 @@ parseable: true skip_list: # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules - # The following rules throw errors. - # These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose. - - '301' - - '302' - - '303' - - '305' - - '306' - - '404' - - '503' + + # DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary # These rules are intentionally skipped: # diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml index 20a06e10c..ccc5e219a 100644 --- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: Query Azure VMs +- name: Query Azure VMs # noqa 301 command: azure vm list-ip-address --json {{ azure_resource_group }} register: vm_list_cmd diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml index e53912cfc..6ba7d5a87 100644 --- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml +++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml @@ -1,14 +1,14 @@ --- -- name: Query Azure VMs IPs +- name: Query Azure VMs IPs # noqa 301 command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} register: vm_ip_list_cmd -- name: Query Azure VMs Roles +- name: Query Azure VMs Roles # noqa 301 command: az vm list -o json --resource-group {{ azure_resource_group }} register: vm_list_cmd -- name: Query Azure Load Balancer Public IP +- name: Query Azure Load Balancer Public IP # noqa 301 command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip register: lb_pubip_cmd diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 40ca53cd6..5b63a6b37 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -69,7 +69,7 @@ # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian, # handle manually -- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) +- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301 raw: | echo {{ item | hash('sha1') }} > /etc/machine-id.new mv -b /etc/machine-id.new /etc/machine-id diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml index 8f80914f8..2865b1004 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml @@ -7,7 +7,7 @@ register: glusterfs_ppa_added when: glusterfs_ppa_use -- name: Ensure GlusterFS client will reinstall if the PPA was just added. +- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503 apt: name: "{{ item }}" state: absent diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml index 3b586c539..855fe36bf 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml @@ -7,7 +7,7 @@ register: glusterfs_ppa_added when: glusterfs_ppa_use -- name: Ensure GlusterFS will reinstall if the PPA was just added. +- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503 apt: name: "{{ item }}" state: absent diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml index 0ffd6f469..e6b16e54a 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml @@ -6,7 +6,7 @@ - name: "Delete bootstrap Heketi." command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml index 7d2c5981e..07e86237c 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml @@ -13,7 +13,7 @@ - name: "Copy topology configuration into container." changed_when: false command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" -- name: "Load heketi topology." +- name: "Load heketi topology." # noqa 503 when: "render.changed" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" register: "load_heketi" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml index 14ab97793..dc93d7828 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml @@ -18,7 +18,7 @@ - name: "Provision database volume." command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" when: "heketi_database_volume_exists is undefined" -- name: "Copy configuration from pod." +- name: "Copy configuration from pod." # noqa 301 become: true command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" - name: "Get heketi volume ids." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml index dd1e272be..4430a5592 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml @@ -10,10 +10,10 @@ template: src: "topology.json.j2" dest: "{{ kube_config_dir }}/topology.json" -- name: "Copy topology configuration into container." +- name: "Copy topology configuration into container." # noqa 503 when: "rendering.changed" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" -- name: "Load heketi topology." +- name: "Load heketi topology." # noqa 503 when: "rendering.changed" command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" - name: "Get heketi topology." diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml index 9ace96e62..7ddbf65c8 100644 --- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml @@ -22,7 +22,7 @@ ignore_errors: true changed_when: false -- name: "Remove volume groups." +- name: "Remove volume groups." # noqa 301 environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management become: true @@ -30,7 +30,7 @@ with_items: "{{ volume_groups.stdout_lines }}" loop_control: { loop_var: "volume_group" } -- name: "Remove physical volume from cluster disks." +- name: "Remove physical volume from cluster disks." # noqa 301 environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management become: true diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml index ddc56b256..18c11a731 100644 --- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -1,43 +1,43 @@ --- -- name: "Remove storage class." +- name: "Remove storage class." # noqa 301 command: "{{ bin_dir }}/kubectl delete storageclass gluster" ignore_errors: true -- name: "Tear down heketi." +- name: "Tear down heketi." # noqa 301 command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" ignore_errors: true -- name: "Tear down heketi." +- name: "Tear down heketi." # noqa 301 command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" ignore_errors: true - name: "Tear down bootstrap." include_tasks: "../provision/tasks/bootstrap/tear-down.yml" -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" retries: 60 delay: 5 -- name: "Ensure there is nothing left over." +- name: "Ensure there is nothing left over." # noqa 301 command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" register: "heketi_result" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" retries: 60 delay: 5 -- name: "Tear down glusterfs." +- name: "Tear down glusterfs." # noqa 301 command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" ignore_errors: true -- name: "Remove heketi storage service." +- name: "Remove heketi storage service." # noqa 301 command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" ignore_errors: true -- name: "Remove heketi gluster role binding" +- name: "Remove heketi gluster role binding" # noqa 301 command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" ignore_errors: true -- name: "Remove heketi config secret" +- name: "Remove heketi config secret" # noqa 301 command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" ignore_errors: true -- name: "Remove heketi db backup" +- name: "Remove heketi db backup" # noqa 301 command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" ignore_errors: true -- name: "Remove heketi service account" +- name: "Remove heketi service account" # noqa 301 command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" ignore_errors: true - name: "Get secrets" diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml index 114e4cf0c..0e1584470 100644 --- a/extra_playbooks/migrate_openstack_provider.yml +++ b/extra_playbooks/migrate_openstack_provider.yml @@ -16,13 +16,13 @@ src: get_cinder_pvs.sh dest: /tmp mode: u+rwx - - name: Get PVs provisioned by in-tree cloud provider + - name: Get PVs provisioned by in-tree cloud provider # noqa 301 command: /tmp/get_cinder_pvs.sh register: pvs - name: Remove get_cinder_pvs.sh file: path: /tmp/get_cinder_pvs.sh state: absent - - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation + - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301 command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org" loop: "{{ pvs.stdout_lines | list }}" diff --git a/roles/container-engine/containerd/tasks/crictl.yml b/roles/container-engine/containerd/tasks/crictl.yml index eaa94efa3..9310cb945 100644 --- a/roles/container-engine/containerd/tasks/crictl.yml +++ b/roles/container-engine/containerd/tasks/crictl.yml @@ -4,7 +4,7 @@ vars: download: "{{ download_defaults | combine(downloads.crictl) }}" -- name: Install crictl config +- name: Install crictl config # noqa 404 template: src: ../templates/crictl.yaml.j2 dest: /etc/crictl.yaml diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index ebed7e11f..8859b1691 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -34,7 +34,7 @@ tags: - facts -- name: disable unified_cgroup_hierarchy in Fedora 31+ +- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305 shell: cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" when: diff --git a/roles/container-engine/cri-o/tasks/crictl.yml b/roles/container-engine/cri-o/tasks/crictl.yml index e96980533..146fef66a 100644 --- a/roles/container-engine/cri-o/tasks/crictl.yml +++ b/roles/container-engine/cri-o/tasks/crictl.yml @@ -4,7 +4,7 @@ vars: download: "{{ download_defaults | combine(downloads.crictl) }}" -- name: Install crictl config +- name: Install crictl config # noqa 404 template: src: ../templates/crictl.yaml.j2 dest: /etc/crictl.yaml @@ -21,7 +21,7 @@ group: no delegate_to: "{{ inventory_hostname }}" -- name: Get crictl completion +- name: Get crictl completion # noqa 305 shell: "{{ bin_dir }}/crictl completion" changed_when: False register: cri_completion diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index af0ecb92e..6b6f114d2 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -59,7 +59,7 @@ - ansible_distribution == "CentOS" - ansible_distribution_major_version == "8" -- name: Ensure latest version of libseccom installed +- name: Ensure latest version of libseccom installed # noqa 303 command: "yum update -y libseccomp" when: - ansible_distribution == "CentOS" diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 9369186bb..c444f897c 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -47,7 +47,7 @@ tags: - facts -- name: disable unified_cgroup_hierarchy in Fedora 31+ +- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305 shell: cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" when: diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml index 23464dabb..b884c7cf0 100644 --- a/roles/container-engine/docker/tasks/set_facts_dns.yml +++ b/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -28,13 +28,13 @@ set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" -- name: check system nameservers +- name: check system nameservers # noqa 306 shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' changed_when: False register: system_nameservers check_mode: no -- name: check system search domains +- name: check system search domains # noqa 306 shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' changed_when: False register: system_search_domains diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml index 0a232ea9e..108eea188 100644 --- a/roles/container-engine/docker/tasks/systemd.yml +++ b/roles/container-engine/docker/tasks/systemd.yml @@ -11,7 +11,7 @@ notify: restart docker when: http_proxy is defined or https_proxy is defined -- name: get systemd version +- name: get systemd version # noqa 306 # noqa 303 - systemctl is called intentionally here shell: systemctl --version | head -n 1 | cut -d " " -f 2 register: systemd_version diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml index 9361b87c5..14dc114fa 100644 --- a/roles/download/tasks/check_pull_required.yml +++ b/roles/download/tasks/check_pull_required.yml @@ -4,7 +4,7 @@ # the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}. # It will output something like the following: # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... -- name: check_pull_required | Generate a list of information about the images on a node +- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 shell: "{{ image_info_command }}" no_log: true register: docker_images diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index 234bf1f95..28b3867f2 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -63,7 +63,7 @@ - pull_required or download_run_once - not image_is_cached - - name: download_container | Save and compress image + - name: download_container | Save and compress image # noqa 305 shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" delegate_to: "{{ download_delegate }}" delegate_facts: no @@ -103,7 +103,7 @@ - pull_required - download_force_cache - - name: download_container | Load image into docker + - name: download_container | Load image into docker # noqa 305 shell: "{{ image_load_command }}" register: container_load_status failed_when: container_load_status is failed diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml index 8e1d131ca..2ac1253f1 100644 --- a/roles/download/tasks/prep_download.yml +++ b/roles/download/tasks/prep_download.yml @@ -32,7 +32,7 @@ - localhost - asserts -- name: prep_download | On localhost, check if user has access to docker without using sudo +- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305 shell: "{{ image_info_command_on_localhost }}" delegate_to: localhost connection: local @@ -68,7 +68,7 @@ - localhost - asserts -- name: prep_download | Register docker images info +- name: prep_download | Register docker images info # noqa 305 shell: "{{ image_info_command }}" no_log: true register: docker_images diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml index 411ef5b3f..c97c19e0b 100644 --- a/roles/download/tasks/prep_kubeadm_images.yml +++ b/roles/download/tasks/prep_kubeadm_images.yml @@ -30,7 +30,7 @@ mode: "0755" state: file -- name: prep_kubeadm_images | Generate list of required images +- name: prep_kubeadm_images | Generate list of required images # noqa 306 shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns" register: kubeadm_images_raw run_once: true diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 56d5f86c8..39df567f6 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -1,5 +1,5 @@ --- -- name: Configure | Check if etcd cluster is healthy +- name: Configure | Check if etcd cluster is healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_cluster_is_healthy failed_when: false @@ -16,7 +16,7 @@ ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" -- name: Configure | Check if etcd-events cluster is healthy +- name: Configure | Check if etcd-events cluster is healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_events_cluster_is_healthy failed_when: false @@ -73,7 +73,7 @@ ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}" when: is_etcd_master and etcd_events_cluster_setup -- name: Configure | Wait for etcd cluster to be healthy +- name: Configure | Wait for etcd cluster to be healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_cluster_is_healthy until: etcd_cluster_is_healthy.rc == 0 @@ -94,7 +94,7 @@ ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" -- name: Configure | Wait for etcd-events cluster to be healthy +- name: Configure | Wait for etcd-events cluster to be healthy # noqa 306 shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'" register: etcd_events_cluster_is_healthy until: etcd_events_cluster_is_healthy.rc == 0 diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 651b76719..5dd25547d 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -139,7 +139,7 @@ inventory_hostname in groups['k8s-cluster']) and sync_certs|default(false) and inventory_hostname not in groups['etcd'] -- name: Gen_certs | Copy certs on nodes +- name: Gen_certs | Copy certs on nodes # noqa 306 shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}" args: executable: /bin/bash diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index a6a197a74..c4de32906 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -1,5 +1,5 @@ --- -- name: Join Member | Add member to etcd-events cluster +- name: Join Member | Add member to etcd-events cluster # noqa 301 305 shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}" register: member_add_result until: member_add_result.rc == 0 @@ -24,7 +24,7 @@ {%- endif -%} {%- endfor -%} -- name: Join Member | Ensure member is in etcd-events cluster +- name: Join Member | Ensure member is in etcd-events cluster # noqa 306 shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}" register: etcd_events_member_in_cluster changed_when: false diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index e7ee2a348..24a800bef 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -1,5 +1,5 @@ --- -- name: Join Member | Add member to etcd cluster +- name: Join Member | Add member to etcd cluster # noqa 301 305 shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}" register: member_add_result until: member_add_result.rc == 0 @@ -24,7 +24,7 @@ {%- endif -%} {%- endfor -%} -- name: Join Member | Ensure member is in etcd cluster +- name: Join Member | Ensure member is in etcd cluster # noqa 306 shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}" register: etcd_member_in_cluster changed_when: false diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml index 1f9da04f2..d9b4d5ef8 100644 --- a/roles/etcd/tasks/upd_ca_trust.yml +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -23,14 +23,14 @@ remote_src: true register: etcd_ca_cert -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) # noqa 503 command: update-ca-certificates when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"] -- name: Gen_certs | update ca-certificates (RedHat) +- name: Gen_certs | update ca-certificates (RedHat) # noqa 503 command: update-ca-trust extract when: etcd_ca_cert.changed and ansible_os_family == "RedHat" -- name: Gen_certs | update ca-certificates (ClearLinux) +- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503 command: clrtrust add "{{ ca_cert_path }}" when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux" diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml index 860ca0e3a..4a3ebff4d 100644 --- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml +++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml @@ -32,7 +32,7 @@ register: helmcert_master run_once: true -- name: Gen_helm_tiller_certs | run cert generation script +- name: Gen_helm_tiller_certs | run cert generation script # noqa 301 run_once: yes delegate_to: "{{ groups['kube-master'][0] }}" command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}" @@ -57,7 +57,7 @@ with_items: - "{{ helm_client_certs }}" -- name: Gen_helm_tiller_certs | Gather helm client certs +- name: Gen_helm_tiller_certs | Gather helm client certs # noqa 306 # noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0" args: @@ -85,7 +85,7 @@ mode: "0600" when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0] -- name: Gen_helm_tiller_certs | Unpack helm certs on masters +- name: Gen_helm_tiller_certs | Unpack helm certs on masters # noqa 306 shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}" no_log: true changed_when: false diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index a830f563d..5887ce3c8 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -52,7 +52,7 @@ - helm_version is version('v3.0.0', '<') # FIXME: https://github.com/helm/helm/issues/6374 -- name: Helm | Install/upgrade helm +- name: Helm | Install/upgrade helm # noqa 306 shell: > {{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }} {% if helm_skip_refresh %} --skip-refresh{% endif %} @@ -78,7 +78,7 @@ environment: "{{ proxy_env }}" # FIXME: https://github.com/helm/helm/issues/4063 -- name: Helm | Force apply tiller overrides if necessary +- name: Helm | Force apply tiller overrides if necessary # noqa 306 shell: > {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }} {% if helm_skip_refresh %} --skip-refresh{% endif %} @@ -108,7 +108,7 @@ - helm_version is version('v3.0.0', '>=') - helm_stable_repo_url is defined -- name: Make sure bash_completion.d folder exists +- name: Make sure bash_completion.d folder exists # noqa 503 file: name: "/etc/bash_completion.d/" state: directory @@ -116,7 +116,7 @@ - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)) - ansible_os_family in ["ClearLinux"] -- name: Helm | Set up bash completion +- name: Helm | Set up bash completion # noqa 503 shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh" when: - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)) diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index 9528aa02d..37f086849 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: "calico upgrade complete" +- name: "calico upgrade complete" # noqa 305 shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" when: - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 347d1b4c2..e9de24b52 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Rotate Tokens | Get default token name +- name: Rotate Tokens | Get default token name # noqa 306 shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token" register: default_token changed_when: false @@ -29,7 +29,7 @@ # FIXME(mattymo): Exclude built in secrets that were automatically rotated, # instead of filtering manually -- name: Rotate Tokens | Get all serviceaccount tokens to expire +- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306 shell: >- {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index bbb1ce0e0..2baeadf23 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -48,7 +48,7 @@ timeout: 180 # NOTE(mattymo): Please forgive this workaround -- name: Generate admin kubeconfig with external api endpoint +- name: Generate admin kubeconfig with external api endpoint # noqa 302 shell: >- mkdir -p {{ kube_config_dir }}/external_kubeconfig && {{ bin_dir }}/kubeadm diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index bf2c26879..91bc35eb2 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -22,7 +22,7 @@ delegate_to: "{{ groups['kube-master'][0] }}" run_once: true -- name: Calculate kubeadm CA cert hash +- name: Calculate kubeadm CA cert hash # noqa 306 shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' register: kubeadm_ca_hash when: @@ -107,7 +107,7 @@ # FIXME(mattymo): Need to point to localhost, otherwise masters will all point # incorrectly to first master, creating SPoF. -- name: Update server field in kube-proxy kubeconfig +- name: Update server field in kube-proxy kubeconfig # noqa 306 shell: >- {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' @@ -131,7 +131,7 @@ group: root mode: "0644" -- name: Restart all kube-proxy pods to ensure that they load the new configmap +- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305 shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true delegate_to: "{{ groups['kube-master']|first }}" @@ -157,7 +157,7 @@ # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776 # is fixed -- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services +- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305 shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy" run_once: true delegate_to: "{{ groups['kube-master']|first }}" diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml index d739fbc8f..1363206f6 100644 --- a/roles/kubernetes/master/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml @@ -47,7 +47,7 @@ when: - old_apiserver_cert.stat.exists -- name: kubeadm | Forcefully delete old static pods +- name: kubeadm | Forcefully delete old static pods # noqa 306 shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f" with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index d6ce320ba..06c3eb525 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -8,7 +8,7 @@ register: kube_apiserver_manifest_replaced when: etcd_secret_changed|default(false) -- name: "Pre-upgrade | Delete master containers forcefully" +- name: "Pre-upgrade | Delete master containers forcefully" # noqa 306 503 shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 21300e3ad..473aaf7eb 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -45,7 +45,7 @@ tags: - kube-proxy -- name: Verify if br_netfilter module exists +- name: Verify if br_netfilter module exists # noqa 305 shell: "modinfo br_netfilter" environment: PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml index 78a39567c..918edfac5 100644 --- a/roles/kubernetes/node/tasks/pre_upgrade.yml +++ b/roles/kubernetes/node/tasks/pre_upgrade.yml @@ -1,5 +1,5 @@ --- -- name: "Pre-upgrade | check if kubelet container exists" +- name: "Pre-upgrade | check if kubelet container exists" # noqa 306 shell: >- {% if container_manager in ['crio', 'docker'] %} docker ps -af name=kubelet | grep kubelet diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index fd4cec362..097ba1d73 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -29,7 +29,7 @@ - Preinstall | reload kubelet when: is_fedora_coreos -- name: Preinstall | reload NetworkManager +- name: Preinstall | reload NetworkManager # noqa 303 command: systemctl restart NetworkManager.service when: is_fedora_coreos diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index 987a4643a..599289d90 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -158,7 +158,7 @@ when: - kube_network_plugin == 'calico' -- name: "Get current version of calico cluster version" +- name: "Get current version of calico cluster version" # noqa 306 shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" register: calico_version_on_server run_once: yes diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index 0a4cd9ef3..a488f2fe0 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -24,14 +24,14 @@ set_fact: is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" -- name: check resolvconf +- name: check resolvconf # noqa 305 shell: which resolvconf register: resolvconf failed_when: false changed_when: false check_mode: no -- name: check systemd-resolved +- name: check systemd-resolved # noqa 303 command: systemctl is-active systemd-resolved register: systemd_resolved_enabled failed_when: false diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml index 44b99a571..2c3546e22 100644 --- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml +++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -1,5 +1,5 @@ --- -- name: Update package management cache (zypper) - SUSE +- name: Update package management cache (zypper) - SUSE # noqa 305 shell: zypper -n --gpg-auto-import-keys ref register: make_cache_output until: make_cache_output is succeeded diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml index b00c576ed..69aa65186 100644 --- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -20,12 +20,12 @@ changed_when: False register: fs_type -- name: run growpart +- name: run growpart # noqa 503 command: growpart /dev/sda 1 when: growpart_needed.changed environment: LC_ALL: C -- name: run xfs_growfs +- name: run xfs_growfs # noqa 503 command: xfs_growfs /dev/sda1 when: growpart_needed.changed and 'XFS' in fs_type.stdout diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index 9507a9323..c6f323b23 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -34,7 +34,7 @@ delegate_to: "{{ groups['kube-master'][0] }}" when: gen_tokens|default(false) -- name: Gen_tokens | Get list of tokens from first master +- name: Gen_tokens | Get list of tokens from first master # noqa 305 shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)" register: tokens_list check_mode: no @@ -42,7 +42,7 @@ run_once: true when: sync_tokens|default(false) -- name: Gen_tokens | Gather tokens +- name: Gen_tokens | Gather tokens # noqa 306 shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0" args: warn: false @@ -52,7 +52,7 @@ run_once: true when: sync_tokens|default(false) -- name: Gen_tokens | Copy tokens on masters +- name: Gen_tokens | Copy tokens on masters # noqa 306 shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" when: - inventory_hostname in groups['kube-master'] diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index 5b80cf1ac..4aa78f61e 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -2,7 +2,7 @@ - name: Calico-rr | Pre-upgrade tasks include_tasks: pre.yml -- name: Calico-rr | Fetch current node object +- name: Calico-rr | Fetch current node object # noqa 301 command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" register: calico_rr_node until: calico_rr_node is succeeded @@ -15,12 +15,12 @@ {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} -- name: Calico-rr | Configure route reflector +- name: Calico-rr | Configure route reflector # noqa 301 305 shell: "{{ bin_dir }}/calicoctl.sh replace -f-" args: stdin: "{{ calico_rr_node_patched | to_json }}" -- name: Calico-rr | Set label for route reflector +- name: Calico-rr | Set label for route reflector # noqa 301 command: >- {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} 'i-am-a-route-reflector=true' --overwrite diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 99888e216..dc92912fc 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -37,7 +37,7 @@ when: - "calico_vxlan_mode in ['Always', 'CrossSubnet']" -- name: "Get current version of calico cluster version" +- name: "Get current version of calico cluster version" # noqa 306 shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'" register: calico_version_on_server run_once: yes diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 77aeba6ef..85a77f7e3 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -6,7 +6,7 @@ mode: 0755 remote_src: yes -- name: Calico | Check if host has NetworkManager +- name: Calico | Check if host has NetworkManager # noqa 303 command: systemctl show NetworkManager register: nm_check failed_when: false @@ -84,7 +84,7 @@ run_once: true when: calico_datastore == "etcd" -- name: Calico | Check if calico network pool has already been configured +- name: Calico | Check if calico network pool has already been configured # noqa 306 shell: > {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l register: calico_conf @@ -131,7 +131,7 @@ loop_control: label: "{{ item.item.file }}" -- name: Calico | Configure calico network pool (version < v3.3.0) +- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306 shell: > echo " { "kind": "IPPool", @@ -149,7 +149,7 @@ - 'calico_conf.stdout == "0"' - calico_version is version("v3.3.0", "<") -- name: Calico | Configure calico network pool (version >= v3.3.0) +- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306 shell: > echo " { "kind": "IPPool", @@ -176,7 +176,7 @@ - inventory_hostname in groups['k8s-cluster'] run_once: yes -- name: Calico | Set global as_num +- name: Calico | Set global as_num # noqa 306 shell: > echo ' { "kind": "BGPConfiguration", @@ -192,7 +192,7 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: Calico | Configure peering with router(s) at global scope +- name: Calico | Configure peering with router(s) at global scope # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -214,7 +214,7 @@ - inventory_hostname == groups['kube-master'][0] - peer_with_router|default(false) -- name: Calico | Configure peering with route reflectors at global scope +- name: Calico | Configure peering with route reflectors at global scope # noqa 306 shell: | echo '{ "apiVersion": "projectcalico.org/v3", @@ -236,7 +236,7 @@ - inventory_hostname == groups['kube-master'][0] - peer_with_calico_rr|default(false) -- name: Calico | Configure route reflectors to peer with each other +- name: Calico | Configure route reflectors to peer with each other # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -309,7 +309,7 @@ - inventory_hostname not in groups['kube-master'] - calico_datastore == "kdd" -- name: Calico | Configure node asNumber for per node peering +- name: Calico | Configure node asNumber for per node peering # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", @@ -333,7 +333,7 @@ - local_as is defined - groups['calico-rr'] | default([]) | length == 0 -- name: Calico | Configure peering with router(s) at node scope +- name: Calico | Configure peering with router(s) at node scope # noqa 306 shell: > echo '{ "apiVersion": "projectcalico.org/v3", diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index e798142f3..aaae21bcd 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -1,5 +1,5 @@ --- -- name: Calico | Get kubelet hostname +- name: Calico | Get kubelet hostname # noqa 306 shell: >- {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml index 1cdab1262..013535072 100644 --- a/roles/network_plugin/calico/tasks/reset.yml +++ b/roles/network_plugin/calico/tasks/reset.yml @@ -8,11 +8,11 @@ command: ip link del dummy0 when: dummy0.stat.exists -- name: reset | get remaining routes set by bird +- name: reset | get remaining routes set by bird # noqa 301 command: ip route show proto bird register: bird_routes -- name: reset | remove remaining routes set by bird +- name: reset | remove remaining routes set by bird # noqa 301 command: "ip route del {{ bird_route }} proto bird" with_items: "{{ bird_routes.stdout_lines }}" loop_control: diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml index a4b7cffd6..0dceac840 100644 --- a/roles/network_plugin/calico/tasks/upgrade.yml +++ b/roles/network_plugin/calico/tasks/upgrade.yml @@ -16,11 +16,11 @@ - "etcdv2" - "etcdv3" -- name: "Tests data migration (dry-run)" +- name: "Tests data migration (dry-run)" # noqa 301 305 shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" register: calico_upgrade_test_data failed_when: '"Successfully" not in calico_upgrade_test_data.stdout' -- name: "If test migration is success continue with calico data real migration" +- name: "If test migration is success continue with calico data real migration" # noqa 301 305 shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade" register: calico_upgrade_migration_data diff --git a/roles/network_plugin/contiv/tasks/pre-reset.yml b/roles/network_plugin/contiv/tasks/pre-reset.yml index a811d5921..f7e66f01f 100644 --- a/roles/network_plugin/contiv/tasks/pre-reset.yml +++ b/roles/network_plugin/contiv/tasks/pre-reset.yml @@ -21,7 +21,7 @@ - contiv_kubectl.stat.exists - inventory_hostname == groups['kube-master'][0] -- name: reset | Copy contiv temporary cleanup script +- name: reset | Copy contiv temporary cleanup script # noqa 404 copy: src: ../files/contiv-cleanup.sh # Not in role_path so we must trick... dest: /opt/cni/bin/cleanup @@ -31,7 +31,7 @@ when: - contiv_kubectl.stat.exists -- name: reset | Lay down contiv cleanup template +- name: reset | Lay down contiv cleanup template # noqa 404 template: src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick... dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index 308b1c625..b254dd997 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Kube-OVN | Label ovn-db node +- name: Kube-OVN | Label ovn-db node # noqa 305 shell: >- {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master when: diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index 751c34716..3608a617f 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Macvlan | Retrieve Pod Cidr +- name: Macvlan | Retrieve Pod Cidr # noqa 301 command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" register: node_pod_cidr_cmd delegate_to: "{{ groups['kube-master'][0] }}" @@ -8,7 +8,7 @@ set_fact: node_pod_cidr={{ node_pod_cidr_cmd.stdout }} -- name: Macvlan | Retrieve default gateway network interface +- name: Macvlan | Retrieve default gateway network interface # noqa 301 become: false raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' register: node_default_gateway_interface_cmd diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml index 64cac81da..55874d543 100644 --- a/roles/recover_control_plane/etcd/tasks/main.yml +++ b/roles/recover_control_plane/etcd/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Get etcd endpoint health +- name: Get etcd endpoint health # noqa 305 shell: "{{ bin_dir }}/etcdctl endpoint health" register: etcd_endpoint_health ignore_errors: true @@ -57,7 +57,7 @@ - groups['broken_etcd'] - "item.rc != 0 and not 'No such file or directory' in item.stderr" -- name: Get etcd cluster members +- name: Get etcd cluster members # noqa 305 shell: "{{ bin_dir }}/etcdctl member list" register: member_list changed_when: false @@ -73,7 +73,7 @@ - not healthy - has_quorum -- name: Remove broken cluster members +- name: Remove broken cluster members # noqa 305 shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" environment: ETCDCTL_API: 3 diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml index dc1011805..ff2c726fd 100644 --- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml +++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml @@ -1,5 +1,5 @@ --- -- name: Save etcd snapshot +- name: Save etcd snapshot # noqa 305 shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" environment: - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" @@ -25,7 +25,7 @@ path: "{{ etcd_data_dir }}" state: absent -- name: Restore etcd snapshot +- name: Restore etcd snapshot # noqa 301 305 shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}" environment: - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" diff --git a/roles/recover_control_plane/master/tasks/main.yml b/roles/recover_control_plane/master/tasks/main.yml index 71a094168..9cc7c33d6 100644 --- a/roles/recover_control_plane/master/tasks/main.yml +++ b/roles/recover_control_plane/master/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Wait for apiserver +- name: Wait for apiserver # noqa 305 shell: "{{ bin_dir }}/kubectl get nodes" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" @@ -10,7 +10,7 @@ changed_when: false when: groups['broken_kube-master'] -- name: Delete broken kube-master nodes from cluster +- name: Delete broken kube-master nodes from cluster # noqa 305 shell: "{{ bin_dir }}/kubectl delete node {{ item }}" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index 37aac0df2..c4660ef87 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Delete node +- name: Delete node # noqa 301 command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube-master']|first }}" ignore_errors: yes \ No newline at end of file diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index f287aa3dd..32421c1a3 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: cordon-node | Mark all nodes as unschedulable before drain +- name: cordon-node | Mark all nodes as unschedulable before drain # noqa 301 command: >- {{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }} with_items: @@ -9,7 +9,7 @@ run_once: true ignore_errors: yes -- name: remove-node | Drain node except daemonsets resource +- name: remove-node | Drain node except daemonsets resource # noqa 301 command: >- {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain --force diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index ffd95a4c8..21a026606 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -34,7 +34,7 @@ delegate_to: "{{ groups['etcd']|first }}" when: inventory_hostname in groups['etcd'] -- name: Remove etcd member from cluster +- name: Remove etcd member from cluster # noqa 305 shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" register: etcd_member_in_cluster changed_when: false diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 5fd98fd6f..4a9b13df9 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -41,12 +41,12 @@ tags: - docker -- name: reset | systemctl daemon-reload +- name: reset | systemctl daemon-reload # noqa 503 systemd: daemon_reload: true when: services_removed.changed or docker_dropins_removed.changed -- name: reset | remove all containers +- name: reset | remove all containers # noqa 306 shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" register: remove_all_containers retries: 4 @@ -56,7 +56,7 @@ tags: - docker -- name: reset | restart docker if needed +- name: reset | restart docker if needed # noqa 503 service: name: docker state: restarted @@ -64,7 +64,7 @@ tags: - docker -- name: reset | stop all cri containers +- name: reset | stop all cri containers # noqa 306 shell: "crictl ps -aq | xargs -r crictl -t 60s stop" register: remove_all_cri_containers retries: 5 @@ -75,7 +75,7 @@ - containerd when: container_manager in ["crio", "containerd"] -- name: reset | remove all cri containers +- name: reset | remove all cri containers # noqa 306 shell: "crictl ps -aq | xargs -r crictl -t 60s rm" register: remove_all_cri_containers retries: 5 @@ -86,7 +86,7 @@ - containerd when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true) -- name: reset | stop all cri pods +- name: reset | stop all cri pods # noqa 306 shell: "crictl pods -q | xargs -r crictl -t 60s stopp" register: remove_all_cri_containers retries: 5 @@ -97,7 +97,7 @@ - containerd when: container_manager in ["crio", "containerd"] -- name: reset | remove all cri pods +- name: reset | remove all cri pods # noqa 306 shell: "crictl pods -q | xargs -r crictl -t 60s rmp" register: remove_all_cri_containers retries: 5 @@ -130,7 +130,7 @@ tags: - services -- name: reset | gather mounted kubelet dirs +- name: reset | gather mounted kubelet dirs # noqa 306 301 shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac args: warn: false @@ -139,7 +139,7 @@ tags: - mounts -- name: reset | unmount kubelet dirs +- name: reset | unmount kubelet dirs # noqa 301 command: umount -f {{ item }} with_items: "{{ mounted_dirs.stdout_lines }}" register: umount_dir @@ -161,7 +161,7 @@ tags: - iptables -- name: Clear IPVS virtual server table +- name: Clear IPVS virtual server table # noqa 305 shell: "ipvsadm -C" when: - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster'] diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 7524e3490..8fd3e5c03 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -112,7 +112,7 @@ {%- endfor %} when: "'etcd' in groups" - - name: Storing commands output + - name: Storing commands output # noqa 306 shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}" failed_when: false with_items: "{{ commands }}" diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml index aae85e409..08f26694a 100644 --- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -34,7 +34,7 @@ when: - item.value.converted|bool -- name: Resize images +- name: Resize images # noqa 301 command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G with_dict: - "{{ images }}" @@ -45,15 +45,15 @@ src: Dockerfile dest: "{{ images_dir }}/Dockerfile" -- name: Create docker images for each OS +- name: Create docker images for each OS # noqa 301 command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }} with_dict: - "{{ images }}" -- name: docker login +- name: docker login # noqa 301 command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}" -- name: docker push image +- name: docker push image # noqa 301 command: docker push {{ registry }}/vm-{{ item.key }}:latest with_dict: - "{{ images }}" diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml index eb33d9838..52317e794 100644 --- a/tests/cloud_playbooks/create-aws.yml +++ b/tests/cloud_playbooks/create-aws.yml @@ -18,7 +18,7 @@ instance_tags: "{{ aws.tags }}" register: ec2 - - name: Template the inventory + - name: Template the inventory # noqa 404 template: src: ../templates/inventory-aws.j2 dest: "{{ inventory_path }}" diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index 37fbafbd6..5d41f714d 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -86,7 +86,7 @@ msg: "{{ droplets }}, {{ inventory_path }}" when: state == 'present' - - name: Template the inventory + - name: Template the inventory # noqa 404 template: src: ../templates/inventory-do.j2 dest: "{{ inventory_path }}" diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index 266481079..57e5d1d41 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -49,7 +49,7 @@ add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts" with_items: '{{ gce.instance_data }}' - - name: Template the inventory + - name: Template the inventory # noqa 404 template: src: ../templates/inventory-gce.j2 dest: "{{ inventory_path }}" @@ -60,7 +60,7 @@ state: directory when: mode in ['scale', 'separate-scale', 'ha-scale'] - - name: Template fake hosts group vars + - name: Template fake hosts group vars # noqa 404 template: src: ../templates/fake_hosts.yml.j2 dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml index 939d432a6..53edd0968 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml @@ -29,7 +29,7 @@ loop_control: index_var: vm_id -- name: Wait for vms to have ipaddress assigned +- name: Wait for vms to have ipaddress assigned # noqa 301 306 shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'" register: vm_ips loop: "{{ range(1, vm_count|int + 1, 1) | list }}" diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml index dc66e2db7..a37d4ed14 100644 --- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml +++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml @@ -16,7 +16,7 @@ state: absent name: "{{ test_name }}" -- name: Wait for namespace {{ test_name }} to be fully deleted +- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305 shell: kubectl get ns {{ test_name }} register: delete_namespace failed_when: diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index f1e3cbaca..6e6457ba5 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -7,7 +7,7 @@ expire_days: 2 tasks: - - name: Generate uniq bucket name prefix + - name: Generate uniq bucket name prefix # noqa 301 raw: date +%Y%m%d register: out @@ -52,7 +52,7 @@ no_log: True failed_when: false - - name: Apply the lifecycle rules + - name: Apply the lifecycle rules # noqa 301 command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}" environment: BOTO_CONFIG: "{{ dir }}/.boto" diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml index be8370cc3..f2cfd2eba 100644 --- a/tests/testcases/015_check-nodes-ready.yml +++ b/tests/testcases/015_check-nodes-ready.yml @@ -15,7 +15,7 @@ - import_role: name: cluster-dump - - name: Check kubectl output + - name: Check kubectl output # noqa 301 305 shell: "{{ bin_dir }}/kubectl get nodes" register: get_nodes no_log: true @@ -23,7 +23,7 @@ - debug: msg: "{{ get_nodes.stdout.split('\n') }}" - - name: Check that all nodes are running and ready + - name: Check that all nodes are running and ready # noqa 301 305 shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml" register: get_nodes_yaml until: diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml index 9679be5fc..8cf95f114 100644 --- a/tests/testcases/020_check-pods-running.yml +++ b/tests/testcases/020_check-pods-running.yml @@ -15,7 +15,7 @@ - import_role: name: cluster-dump - - name: Check kubectl output + - name: Check kubectl output # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" register: get_pods no_log: true @@ -23,7 +23,7 @@ - debug: msg: "{{ get_pods.stdout.split('\n') }}" - - name: Check that all pods are running and ready + - name: Check that all pods are running and ready # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml" register: run_pods_log until: @@ -36,7 +36,7 @@ failed_when: false no_log: true - - name: Check kubectl output + - name: Check kubectl output # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" register: get_pods no_log: true diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index bee470ef7..8887e38fe 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -15,10 +15,10 @@ bin_dir: "/usr/local/bin" when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"] - - name: Create test namespace + - name: Create test namespace # noqa 301 305 shell: "{{ bin_dir }}/kubectl create namespace test" - - name: Run 2 busybox pods in test ns + - name: Run 2 busybox pods in test ns # noqa 301 305 shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null" loop: - busybox1 @@ -27,7 +27,7 @@ - import_role: name: cluster-dump - - name: Check that all pods are running and ready + - name: Check that all pods are running and ready # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml" register: run_pods_log until: @@ -40,7 +40,7 @@ failed_when: false no_log: true - - name: Get pod names + - name: Get pod names # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods -n test -o json" register: pods no_log: true @@ -49,19 +49,19 @@ msg: "{{ pods.stdout.split('\n') }}" failed_when: not run_pods_log is success - - name: Get hostnet pods + - name: Get hostnet pods # noqa 301 command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: hostnet_pods no_log: true - - name: Get running pods + - name: Get running pods # noqa 301 command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: running_pods no_log: true - - name: Check kubectl output + - name: Check kubectl output # noqa 301 305 shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" register: get_pods no_log: true @@ -89,7 +89,7 @@ - item in pods_running with_items: "{{ pod_ips }}" - - name: Ping between pods is working + - name: Ping between pods is working # noqa 305 shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" when: - not item[0] in pods_hostnet @@ -98,7 +98,7 @@ - "{{ pod_names }}" - "{{ pod_ips }}" - - name: Ping between hostnet pods is working + - name: Ping between hostnet pods is working # noqa 305 shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" when: - item[0] in pods_hostnet diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index e6ea13a24..541235255 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -1,7 +1,7 @@ --- - hosts: kube-node tasks: - - name: Test tunl0 routes + - name: Test tunl0 routes # noqa 306 shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0" when: - (ipip|default(true) or cloud_provider is defined) @@ -14,7 +14,7 @@ netchecker_port: 31081 tasks: - - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) + - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305 shell: "ethtool --offload flannel.1 rx off tx off" ignore_errors: true when: @@ -33,7 +33,7 @@ - import_role: name: cluster-dump - - name: Wait for netchecker server + - name: Wait for netchecker server # noqa 306 shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server" register: ncs_pod until: ncs_pod.stdout.find('Running') != -1 @@ -41,7 +41,7 @@ delay: 10 when: inventory_hostname == groups['kube-master'][0] - - name: Wait for netchecker agents + - name: Wait for netchecker agents # noqa 306 shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'" register: nca_pod until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2 @@ -214,7 +214,7 @@ - inventory_hostname == groups['kube-master'][0] - kube_network_plugin_multus|default(false)|bool - - name: Check secondary macvlan interface + - name: Check secondary macvlan interface # noqa 305 shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1" register: output until: output.rc == 0 diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml index e1d5d35a6..bae50b87d 100644 --- a/tests/testcases/roles/cluster-dump/tasks/main.yml +++ b/tests/testcases/roles/cluster-dump/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Generate dump folder +- name: Generate dump folder # noqa 305 shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump" no_log: true when: inventory_hostname in groups['kube-master']