Fix ansible-lint E305 (#6459)
This commit is contained in:
parent
8bd3b50e31
commit
214e08f8c9
22 changed files with 59 additions and 61 deletions
|
@ -34,9 +34,8 @@
|
|||
tags:
|
||||
- facts
|
||||
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||
shell:
|
||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
||||
command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
when:
|
||||
- ansible_distribution == "Fedora"
|
||||
- (ansible_distribution_major_version | int) >= 31
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
|
||||
- name: Get crictl completion # noqa 305
|
||||
shell: "{{ bin_dir }}/crictl completion"
|
||||
- name: Get crictl completion
|
||||
command: "{{ bin_dir }}/crictl completion"
|
||||
changed_when: False
|
||||
register: cri_completion
|
||||
|
||||
|
|
|
@ -47,9 +47,8 @@
|
|||
tags:
|
||||
- facts
|
||||
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||
shell:
|
||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
||||
command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
when:
|
||||
- ansible_distribution == "Fedora"
|
||||
- (ansible_distribution_major_version | int) >= 31
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
|
||||
# It will output something like the following:
|
||||
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
||||
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305
|
||||
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
|
||||
shell: "{{ image_info_command }}"
|
||||
no_log: true
|
||||
register: docker_images
|
||||
|
|
|
@ -63,8 +63,8 @@
|
|||
- pull_required or download_run_once
|
||||
- not image_is_cached
|
||||
|
||||
- name: download_container | Save and compress image # noqa 305
|
||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
|
||||
- name: download_container | Save and compress image
|
||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
|
||||
delegate_to: "{{ download_delegate }}"
|
||||
delegate_facts: no
|
||||
register: container_save_status
|
||||
|
@ -103,8 +103,8 @@
|
|||
- pull_required
|
||||
- download_force_cache
|
||||
|
||||
- name: download_container | Load image into docker # noqa 305
|
||||
shell: "{{ image_load_command }}"
|
||||
- name: download_container | Load image into docker
|
||||
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
|
||||
register: container_load_status
|
||||
failed_when: container_load_status is failed
|
||||
when:
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
- localhost
|
||||
- asserts
|
||||
|
||||
- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305
|
||||
shell: "{{ image_info_command_on_localhost }}"
|
||||
- name: prep_download | On localhost, check if user has access to docker without using sudo
|
||||
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
run_once: true
|
||||
|
@ -68,8 +68,8 @@
|
|||
- localhost
|
||||
- asserts
|
||||
|
||||
- name: prep_download | Register docker images info # noqa 305
|
||||
shell: "{{ image_info_command }}"
|
||||
- name: prep_download | Register docker images info
|
||||
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
|
||||
no_log: true
|
||||
register: docker_images
|
||||
failed_when: false
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: "calico upgrade complete" # noqa 305
|
||||
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
- name: "calico upgrade complete"
|
||||
command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- calico_upgrade_enabled|default(True)
|
||||
|
|
|
@ -131,8 +131,8 @@
|
|||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
delegate_facts: false
|
||||
|
@ -157,8 +157,8 @@
|
|||
|
||||
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
|
||||
# is fixed
|
||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
when:
|
||||
|
|
|
@ -45,8 +45,8 @@
|
|||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Verify if br_netfilter module exists # noqa 305
|
||||
shell: "modinfo br_netfilter"
|
||||
- name: Verify if br_netfilter module exists
|
||||
command: "modinfo br_netfilter"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||
register: modinfo_br_netfilter
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
set_fact:
|
||||
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
|
||||
|
||||
- name: check resolvconf # noqa 305
|
||||
shell: which resolvconf
|
||||
- name: check resolvconf
|
||||
command: which resolvconf
|
||||
register: resolvconf
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Update package management cache (zypper) - SUSE # noqa 305
|
||||
shell: zypper -n --gpg-auto-import-keys ref
|
||||
- name: Update package management cache (zypper) - SUSE
|
||||
command: zypper -n --gpg-auto-import-keys ref
|
||||
register: make_cache_output
|
||||
until: make_cache_output is succeeded
|
||||
retries: 4
|
||||
|
|
|
@ -34,8 +34,8 @@
|
|||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when: gen_tokens|default(false)
|
||||
|
||||
- name: Gen_tokens | Get list of tokens from first master # noqa 305
|
||||
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
||||
- name: Gen_tokens | Get list of tokens from first master
|
||||
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
|
||||
register: tokens_list
|
||||
check_mode: no
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Kube-OVN | Label ovn-db node # noqa 305
|
||||
shell: >-
|
||||
- name: Kube-OVN | Label ovn-db node
|
||||
command: >-
|
||||
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Get etcd endpoint health # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint health"
|
||||
- name: Get etcd endpoint health
|
||||
command: "{{ bin_dir }}/etcdctl endpoint health"
|
||||
register: etcd_endpoint_health
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
@ -57,8 +57,8 @@
|
|||
- groups['broken_etcd']
|
||||
- "item.rc != 0 and not 'No such file or directory' in item.stderr"
|
||||
|
||||
- name: Get etcd cluster members # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member list"
|
||||
- name: Get etcd cluster members
|
||||
command: "{{ bin_dir }}/etcdctl member list"
|
||||
register: member_list
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
@ -73,8 +73,8 @@
|
|||
- not healthy
|
||||
- has_quorum
|
||||
|
||||
- name: Remove broken cluster members # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
||||
- name: Remove broken cluster members
|
||||
command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
||||
environment:
|
||||
ETCDCTL_API: 3
|
||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Save etcd snapshot # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
|
||||
- name: Save etcd snapshot
|
||||
command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
|
||||
environment:
|
||||
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Wait for apiserver # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl get nodes"
|
||||
- name: Wait for apiserver
|
||||
command: "{{ bin_dir }}/kubectl get nodes"
|
||||
environment:
|
||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
register: apiserver_is_ready
|
||||
|
@ -10,8 +10,8 @@
|
|||
changed_when: false
|
||||
when: groups['broken_kube-master']
|
||||
|
||||
- name: Delete broken kube-master nodes from cluster # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
||||
- name: Delete broken kube-master nodes from cluster
|
||||
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
||||
environment:
|
||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
with_items: "{{ groups['broken_kube-master'] }}"
|
||||
|
|
|
@ -34,8 +34,8 @@
|
|||
delegate_to: "{{ groups['etcd']|first }}"
|
||||
when: inventory_hostname in groups['etcd']
|
||||
|
||||
- name: Remove etcd member from cluster # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
|
||||
- name: Remove etcd member from cluster
|
||||
command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
|
|
@ -161,8 +161,8 @@
|
|||
tags:
|
||||
- iptables
|
||||
|
||||
- name: Clear IPVS virtual server table # noqa 305
|
||||
shell: "ipvsadm -C"
|
||||
- name: Clear IPVS virtual server table
|
||||
command: "ipvsadm -C"
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
state: absent
|
||||
name: "{{ test_name }}"
|
||||
|
||||
- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305
|
||||
shell: kubectl get ns {{ test_name }}
|
||||
- name: Wait for namespace {{ test_name }} to be fully deleted
|
||||
command: kubectl get ns {{ test_name }}
|
||||
register: delete_namespace
|
||||
failed_when:
|
||||
- delete_namespace.rc == 0
|
||||
|
|
|
@ -89,8 +89,8 @@
|
|||
- item in pods_running
|
||||
with_items: "{{ pod_ips }}"
|
||||
|
||||
- name: Ping between pods is working # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||
- name: Ping between pods is working
|
||||
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||
when:
|
||||
- not item[0] in pods_hostnet
|
||||
- not item[1] in pods_hostnet
|
||||
|
@ -98,8 +98,8 @@
|
|||
- "{{ pod_names }}"
|
||||
- "{{ pod_ips }}"
|
||||
|
||||
- name: Ping between hostnet pods is working # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||
- name: Ping between hostnet pods is working
|
||||
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||
when:
|
||||
- item[0] in pods_hostnet
|
||||
- item[1] in pods_hostnet
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
netchecker_port: 31081
|
||||
|
||||
tasks:
|
||||
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305
|
||||
shell: "ethtool --offload flannel.1 rx off tx off"
|
||||
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
|
||||
command: "ethtool --offload flannel.1 rx off tx off"
|
||||
ignore_errors: true
|
||||
when:
|
||||
- kube_network_plugin|default('calico') == 'flannel'
|
||||
|
@ -214,8 +214,8 @@
|
|||
- inventory_hostname == groups['kube-master'][0]
|
||||
- kube_network_plugin_multus|default(false)|bool
|
||||
|
||||
- name: Check secondary macvlan interface # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
|
||||
- name: Check secondary macvlan interface
|
||||
command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
|
||||
register: output
|
||||
until: output.rc == 0
|
||||
retries: 90
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Generate dump folder # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
|
||||
- name: Generate dump folder
|
||||
command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
|
||||
no_log: true
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
|
|
Loading…
Reference in a new issue