Fix ansible-lint E305 (#6459)

This commit is contained in:
Maxime Guyot 2020-07-28 10:39:08 +02:00 committed by GitHub
parent 8bd3b50e31
commit 214e08f8c9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 59 additions and 61 deletions

View file

@ -34,9 +34,8 @@
tags: tags:
- facts - facts
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305 - name: disable unified_cgroup_hierarchy in Fedora 31+
shell: command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when: when:
- ansible_distribution == "Fedora" - ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31 - (ansible_distribution_major_version | int) >= 31

View file

@ -21,8 +21,8 @@
group: no group: no
delegate_to: "{{ inventory_hostname }}" delegate_to: "{{ inventory_hostname }}"
- name: Get crictl completion # noqa 305 - name: Get crictl completion
shell: "{{ bin_dir }}/crictl completion" command: "{{ bin_dir }}/crictl completion"
changed_when: False changed_when: False
register: cri_completion register: cri_completion

View file

@ -47,9 +47,8 @@
tags: tags:
- facts - facts
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305 - name: disable unified_cgroup_hierarchy in Fedora 31+
shell: command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when: when:
- ansible_distribution == "Fedora" - ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31 - (ansible_distribution_major_version | int) >= 31

View file

@ -4,7 +4,7 @@
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}. # the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
# It will output something like the following: # It will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 - name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}" shell: "{{ image_info_command }}"
no_log: true no_log: true
register: docker_images register: docker_images

View file

@ -63,8 +63,8 @@
- pull_required or download_run_once - pull_required or download_run_once
- not image_is_cached - not image_is_cached
- name: download_container | Save and compress image # noqa 305 - name: download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}" delegate_to: "{{ download_delegate }}"
delegate_facts: no delegate_facts: no
register: container_save_status register: container_save_status
@ -103,8 +103,8 @@
- pull_required - pull_required
- download_force_cache - download_force_cache
- name: download_container | Load image into docker # noqa 305 - name: download_container | Load image into docker
shell: "{{ image_load_command }}" shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
register: container_load_status register: container_load_status
failed_when: container_load_status is failed failed_when: container_load_status is failed
when: when:

View file

@ -32,8 +32,8 @@
- localhost - localhost
- asserts - asserts
- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305 - name: prep_download | On localhost, check if user has access to docker without using sudo
shell: "{{ image_info_command_on_localhost }}" shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost delegate_to: localhost
connection: local connection: local
run_once: true run_once: true
@ -68,8 +68,8 @@
- localhost - localhost
- asserts - asserts
- name: prep_download | Register docker images info # noqa 305 - name: prep_download | Register docker images info
shell: "{{ image_info_command }}" shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
no_log: true no_log: true
register: docker_images register: docker_images
failed_when: false failed_when: false

View file

@ -1,6 +1,6 @@
--- ---
- name: "calico upgrade complete" # noqa 305 - name: "calico upgrade complete"
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
when: when:
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]
- calico_upgrade_enabled|default(True) - calico_upgrade_enabled|default(True)

View file

@ -131,8 +131,8 @@
group: root group: root
mode: "0644" mode: "0644"
- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305 - name: Restart all kube-proxy pods to ensure that they load the new configmap
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true run_once: true
delegate_to: "{{ groups['kube-master']|first }}" delegate_to: "{{ groups['kube-master']|first }}"
delegate_facts: false delegate_facts: false
@ -157,8 +157,8 @@
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776 # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
# is fixed # is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305 - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy" command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
run_once: true run_once: true
delegate_to: "{{ groups['kube-master']|first }}" delegate_to: "{{ groups['kube-master']|first }}"
when: when:

View file

@ -45,8 +45,8 @@
tags: tags:
- kube-proxy - kube-proxy
- name: Verify if br_netfilter module exists # noqa 305 - name: Verify if br_netfilter module exists
shell: "modinfo br_netfilter" command: "modinfo br_netfilter"
environment: environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
register: modinfo_br_netfilter register: modinfo_br_netfilter

View file

@ -24,8 +24,8 @@
set_fact: set_fact:
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
- name: check resolvconf # noqa 305 - name: check resolvconf
shell: which resolvconf command: which resolvconf
register: resolvconf register: resolvconf
failed_when: false failed_when: false
changed_when: false changed_when: false

View file

@ -1,6 +1,6 @@
--- ---
- name: Update package management cache (zypper) - SUSE # noqa 305 - name: Update package management cache (zypper) - SUSE
shell: zypper -n --gpg-auto-import-keys ref command: zypper -n --gpg-auto-import-keys ref
register: make_cache_output register: make_cache_output
until: make_cache_output is succeeded until: make_cache_output is succeeded
retries: 4 retries: 4

View file

@ -34,8 +34,8 @@
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"
when: gen_tokens|default(false) when: gen_tokens|default(false)
- name: Gen_tokens | Get list of tokens from first master # noqa 305 - name: Gen_tokens | Get list of tokens from first master
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)" command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list register: tokens_list
check_mode: no check_mode: no
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"

View file

@ -1,6 +1,6 @@
--- ---
- name: Kube-OVN | Label ovn-db node # noqa 305 - name: Kube-OVN | Label ovn-db node
shell: >- command: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
when: when:
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]

View file

@ -1,6 +1,6 @@
--- ---
- name: Get etcd endpoint health # noqa 305 - name: Get etcd endpoint health
shell: "{{ bin_dir }}/etcdctl endpoint health" command: "{{ bin_dir }}/etcdctl endpoint health"
register: etcd_endpoint_health register: etcd_endpoint_health
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
@ -57,8 +57,8 @@
- groups['broken_etcd'] - groups['broken_etcd']
- "item.rc != 0 and not 'No such file or directory' in item.stderr" - "item.rc != 0 and not 'No such file or directory' in item.stderr"
- name: Get etcd cluster members # noqa 305 - name: Get etcd cluster members
shell: "{{ bin_dir }}/etcdctl member list" command: "{{ bin_dir }}/etcdctl member list"
register: member_list register: member_list
changed_when: false changed_when: false
check_mode: no check_mode: no
@ -73,8 +73,8 @@
- not healthy - not healthy
- has_quorum - has_quorum
- name: Remove broken cluster members # noqa 305 - name: Remove broken cluster members
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}" command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
environment: environment:
ETCDCTL_API: 3 ETCDCTL_API: 3
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}" ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"

View file

@ -1,6 +1,6 @@
--- ---
- name: Save etcd snapshot # noqa 305 - name: Save etcd snapshot
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db" command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
environment: environment:
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"

View file

@ -1,6 +1,6 @@
--- ---
- name: Wait for apiserver # noqa 305 - name: Wait for apiserver
shell: "{{ bin_dir }}/kubectl get nodes" command: "{{ bin_dir }}/kubectl get nodes"
environment: environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
register: apiserver_is_ready register: apiserver_is_ready
@ -10,8 +10,8 @@
changed_when: false changed_when: false
when: groups['broken_kube-master'] when: groups['broken_kube-master']
- name: Delete broken kube-master nodes from cluster # noqa 305 - name: Delete broken kube-master nodes from cluster
shell: "{{ bin_dir }}/kubectl delete node {{ item }}" command: "{{ bin_dir }}/kubectl delete node {{ item }}"
environment: environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube-master'] }}" with_items: "{{ groups['broken_kube-master'] }}"

View file

@ -34,8 +34,8 @@
delegate_to: "{{ groups['etcd']|first }}" delegate_to: "{{ groups['etcd']|first }}"
when: inventory_hostname in groups['etcd'] when: inventory_hostname in groups['etcd']
- name: Remove etcd member from cluster # noqa 305 - name: Remove etcd member from cluster
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}" command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
register: etcd_member_in_cluster register: etcd_member_in_cluster
changed_when: false changed_when: false
check_mode: no check_mode: no

View file

@ -161,8 +161,8 @@
tags: tags:
- iptables - iptables
- name: Clear IPVS virtual server table # noqa 305 - name: Clear IPVS virtual server table
shell: "ipvsadm -C" command: "ipvsadm -C"
when: when:
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster'] - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']

View file

@ -16,8 +16,8 @@
state: absent state: absent
name: "{{ test_name }}" name: "{{ test_name }}"
- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305 - name: Wait for namespace {{ test_name }} to be fully deleted
shell: kubectl get ns {{ test_name }} command: kubectl get ns {{ test_name }}
register: delete_namespace register: delete_namespace
failed_when: failed_when:
- delete_namespace.rc == 0 - delete_namespace.rc == 0

View file

@ -89,8 +89,8 @@
- item in pods_running - item in pods_running
with_items: "{{ pod_ips }}" with_items: "{{ pod_ips }}"
- name: Ping between pods is working # noqa 305 - name: Ping between pods is working
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when: when:
- not item[0] in pods_hostnet - not item[0] in pods_hostnet
- not item[1] in pods_hostnet - not item[1] in pods_hostnet
@ -98,8 +98,8 @@
- "{{ pod_names }}" - "{{ pod_names }}"
- "{{ pod_ips }}" - "{{ pod_ips }}"
- name: Ping between hostnet pods is working # noqa 305 - name: Ping between hostnet pods is working
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}" command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when: when:
- item[0] in pods_hostnet - item[0] in pods_hostnet
- item[1] in pods_hostnet - item[1] in pods_hostnet

View file

@ -14,8 +14,8 @@
netchecker_port: 31081 netchecker_port: 31081
tasks: tasks:
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305 - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
shell: "ethtool --offload flannel.1 rx off tx off" command: "ethtool --offload flannel.1 rx off tx off"
ignore_errors: true ignore_errors: true
when: when:
- kube_network_plugin|default('calico') == 'flannel' - kube_network_plugin|default('calico') == 'flannel'
@ -214,8 +214,8 @@
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]
- kube_network_plugin_multus|default(false)|bool - kube_network_plugin_multus|default(false)|bool
- name: Check secondary macvlan interface # noqa 305 - name: Check secondary macvlan interface
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1" command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output register: output
until: output.rc == 0 until: output.rc == 0
retries: 90 retries: 90

View file

@ -1,6 +1,6 @@
--- ---
- name: Generate dump folder # noqa 305 - name: Generate dump folder
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump" command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
no_log: true no_log: true
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']