From 214e08f8c91f6183ec94567ee0145879743ad9d9 Mon Sep 17 00:00:00 2001
From: Maxime Guyot <Miouge1@users.noreply.github.com>
Date: Tue, 28 Jul 2020 10:39:08 +0200
Subject: [PATCH] Fix ansible-lint E305 (#6459)

---
 roles/container-engine/containerd/tasks/main.yml     |  5 ++---
 roles/container-engine/cri-o/tasks/crictl.yml        |  4 ++--
 roles/container-engine/docker/tasks/main.yml         |  5 ++---
 roles/download/tasks/check_pull_required.yml         |  2 +-
 roles/download/tasks/download_container.yml          |  8 ++++----
 roles/download/tasks/prep_download.yml               |  8 ++++----
 .../network_plugin/calico/tasks/main.yml             |  4 ++--
 roles/kubernetes/kubeadm/tasks/main.yml              |  8 ++++----
 roles/kubernetes/node/tasks/main.yml                 |  4 ++--
 roles/kubernetes/preinstall/tasks/0040-set_facts.yml |  4 ++--
 .../preinstall/tasks/0070-system-packages.yml        |  4 ++--
 roles/kubernetes/tokens/tasks/gen_tokens.yml         |  4 ++--
 roles/network_plugin/kube-ovn/tasks/main.yml         |  4 ++--
 roles/recover_control_plane/etcd/tasks/main.yml      | 12 ++++++------
 .../etcd/tasks/recover_lost_quorum.yml               |  4 ++--
 roles/recover_control_plane/master/tasks/main.yml    |  8 ++++----
 roles/remove-node/remove-etcd-node/tasks/main.yml    |  4 ++--
 roles/reset/tasks/main.yml                           |  4 ++--
 .../roles/packet-ci/tasks/delete-vms.yml             |  4 ++--
 tests/testcases/030_check-network.yml                |  8 ++++----
 tests/testcases/040_check-network-adv.yml            |  8 ++++----
 tests/testcases/roles/cluster-dump/tasks/main.yml    |  4 ++--
 22 files changed, 59 insertions(+), 61 deletions(-)

diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index 8859b1691..41b8df674 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -34,9 +34,8 @@
   tags:
     - facts
 
-- name: disable unified_cgroup_hierarchy in Fedora 31+  # noqa 305
-  shell:
-    cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
+- name: disable unified_cgroup_hierarchy in Fedora 31+
+  command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
   when:
     - ansible_distribution == "Fedora"
     - (ansible_distribution_major_version | int) >= 31
diff --git a/roles/container-engine/cri-o/tasks/crictl.yml b/roles/container-engine/cri-o/tasks/crictl.yml
index 1639c4971..574957457 100644
--- a/roles/container-engine/cri-o/tasks/crictl.yml
+++ b/roles/container-engine/cri-o/tasks/crictl.yml
@@ -21,8 +21,8 @@
     group: no
   delegate_to: "{{ inventory_hostname }}"
 
-- name: Get crictl completion  # noqa 305
-  shell: "{{ bin_dir }}/crictl completion"
+- name: Get crictl completion
+  command: "{{ bin_dir }}/crictl completion"
   changed_when: False
   register: cri_completion
 
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index c444f897c..b0d5adbf6 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -47,9 +47,8 @@
   tags:
     - facts
 
-- name: disable unified_cgroup_hierarchy in Fedora 31+  # noqa 305
-  shell:
-    cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
+- name: disable unified_cgroup_hierarchy in Fedora 31+
+  command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
   when:
     - ansible_distribution == "Fedora"
     - (ansible_distribution_major_version | int) >= 31
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index 14dc114fa..cc31a1423 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -4,7 +4,7 @@
 # the template, just replace all instances  of {{ `{{` }} with {{ and {{ '}}' }} with }}.
 # It will output something like the following:
 # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
-- name: check_pull_required |  Generate a list of information about the images on a node  # noqa 305
+- name: check_pull_required |  Generate a list of information about the images on a node  # noqa 305 image_info_command contains a pipe, therefore requiring shell
   shell: "{{ image_info_command }}"
   no_log: true
   register: docker_images
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 28b3867f2..7f015999f 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -63,8 +63,8 @@
         - pull_required or download_run_once
         - not image_is_cached
 
-    - name: download_container | Save and compress image  # noqa 305
-      shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
+    - name: download_container | Save and compress image
+      shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"  # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
       delegate_to: "{{ download_delegate }}"
       delegate_facts: no
       register: container_save_status
@@ -103,8 +103,8 @@
         - pull_required
         - download_force_cache
 
-    - name: download_container | Load image into docker  # noqa 305
-      shell: "{{ image_load_command }}"
+    - name: download_container | Load image into docker
+      shell: "{{ image_load_command }}"  # noqa 305 image_load_command uses pipes, therefore requires shell
       register: container_load_status
       failed_when: container_load_status is failed
       when:
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 2ac1253f1..88eb414ab 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -32,8 +32,8 @@
     - localhost
     - asserts
 
-- name: prep_download | On localhost, check if user has access to docker without using sudo  # noqa 305
-  shell: "{{ image_info_command_on_localhost }}"
+- name: prep_download | On localhost, check if user has access to docker without using sudo
+  shell: "{{ image_info_command_on_localhost }}"  # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
   delegate_to: localhost
   connection: local
   run_once: true
@@ -68,8 +68,8 @@
     - localhost
     - asserts
 
-- name: prep_download | Register docker images info  # noqa 305
-  shell: "{{ image_info_command }}"
+- name: prep_download | Register docker images info
+  shell: "{{ image_info_command }}"  # noqa 305 image_info_command contains pipe therefore requires shell
   no_log: true
   register: docker_images
   failed_when: false
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index 37f086849..af902c11c 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
-- name: "calico upgrade complete"  # noqa 305
-  shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
+- name: "calico upgrade complete"
+  command: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
   when:
     - inventory_hostname == groups['kube-master'][0]
     - calico_upgrade_enabled|default(True)
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 91bc35eb2..75435095a 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -131,8 +131,8 @@
     group: root
     mode: "0644"
 
-- name: Restart all kube-proxy pods to ensure that they load the new configmap  # noqa 305
-  shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
+- name: Restart all kube-proxy pods to ensure that they load the new configmap
+  command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
   run_once: true
   delegate_to: "{{ groups['kube-master']|first }}"
   delegate_facts: false
@@ -157,8 +157,8 @@
 
 # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
 # is fixed
-- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services  # noqa 305
-  shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
+- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
+  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
   run_once: true
   delegate_to: "{{ groups['kube-master']|first }}"
   when:
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 473aaf7eb..b2e78e4c6 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -45,8 +45,8 @@
   tags:
     - kube-proxy
 
-- name: Verify if br_netfilter module exists  # noqa 305
-  shell: "modinfo br_netfilter"
+- name: Verify if br_netfilter module exists
+  command: "modinfo br_netfilter"
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH's conservative path management
   register: modinfo_br_netfilter
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index a488f2fe0..784233c20 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -24,8 +24,8 @@
   set_fact:
     is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
 
-- name: check resolvconf  # noqa 305
-  shell: which resolvconf
+- name: check resolvconf
+  command: which resolvconf
   register: resolvconf
   failed_when: false
   changed_when: false
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index 2c3546e22..d447c70e4 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -1,6 +1,6 @@
 ---
-- name: Update package management cache (zypper) - SUSE  # noqa 305
-  shell: zypper -n --gpg-auto-import-keys ref
+- name: Update package management cache (zypper) - SUSE
+  command: zypper -n --gpg-auto-import-keys ref
   register: make_cache_output
   until: make_cache_output is succeeded
   retries: 4
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index c6f323b23..ff0983bb3 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -34,8 +34,8 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
-- name: Gen_tokens | Get list of tokens from first master  # noqa 305
-  shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
+- name: Gen_tokens | Get list of tokens from first master
+  command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
   register: tokens_list
   check_mode: no
   delegate_to: "{{ groups['kube-master'][0] }}"
diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml
index b254dd997..c416f120a 100644
--- a/roles/network_plugin/kube-ovn/tasks/main.yml
+++ b/roles/network_plugin/kube-ovn/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
-- name: Kube-OVN | Label ovn-db node  # noqa 305
-  shell: >-
+- name: Kube-OVN | Label ovn-db node
+  command: >-
     {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
   when:
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 55874d543..ac3292283 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
-- name: Get etcd endpoint health  # noqa 305
-  shell: "{{ bin_dir }}/etcdctl endpoint health"
+- name: Get etcd endpoint health
+  command: "{{ bin_dir }}/etcdctl endpoint health"
   register: etcd_endpoint_health
   ignore_errors: true
   changed_when: false
@@ -57,8 +57,8 @@
     - groups['broken_etcd']
     - "item.rc != 0 and not 'No such file or directory' in item.stderr"
 
-- name: Get etcd cluster members  # noqa 305
-  shell: "{{ bin_dir }}/etcdctl member list"
+- name: Get etcd cluster members
+  command: "{{ bin_dir }}/etcdctl member list"
   register: member_list
   changed_when: false
   check_mode: no
@@ -73,8 +73,8 @@
     - not healthy
     - has_quorum
 
-- name: Remove broken cluster members  # noqa 305
-  shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
+- name: Remove broken cluster members
+  command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
   environment:
     ETCDCTL_API: 3
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index ff2c726fd..bef89f192 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -1,6 +1,6 @@
 ---
-- name: Save etcd snapshot  # noqa 305
-  shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
+- name: Save etcd snapshot
+  command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
   environment:
     - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/recover_control_plane/master/tasks/main.yml b/roles/recover_control_plane/master/tasks/main.yml
index 9cc7c33d6..5f4b6a922 100644
--- a/roles/recover_control_plane/master/tasks/main.yml
+++ b/roles/recover_control_plane/master/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
-- name: Wait for apiserver  # noqa 305
-  shell: "{{ bin_dir }}/kubectl get nodes"
+- name: Wait for apiserver
+  command: "{{ bin_dir }}/kubectl get nodes"
   environment:
     - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
   register: apiserver_is_ready
@@ -10,8 +10,8 @@
   changed_when: false
   when: groups['broken_kube-master']
 
-- name: Delete broken kube-master nodes from cluster  # noqa 305
-  shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
+- name: Delete broken kube-master nodes from cluster
+  command: "{{ bin_dir }}/kubectl delete node {{ item }}"
   environment:
     - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
   with_items: "{{ groups['broken_kube-master'] }}"
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index 21a026606..0fc0f7fb4 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -34,8 +34,8 @@
   delegate_to: "{{ groups['etcd']|first }}"
   when: inventory_hostname in groups['etcd']
 
-- name: Remove etcd member from cluster  # noqa 305
-  shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
+- name: Remove etcd member from cluster
+  command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
   register: etcd_member_in_cluster
   changed_when: false
   check_mode: no
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 4a9b13df9..bbc76eebe 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -161,8 +161,8 @@
   tags:
     - iptables
 
-- name: Clear IPVS virtual server table  # noqa 305
-  shell: "ipvsadm -C"
+- name: Clear IPVS virtual server table
+  command: "ipvsadm -C"
   when:
     - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
 
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
index a37d4ed14..5cde2e7b0 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
@@ -16,8 +16,8 @@
     state: absent
     name: "{{ test_name }}"
 
-- name: Wait for namespace {{ test_name }} to be fully deleted  # noqa 305
-  shell: kubectl get ns {{ test_name }}
+- name: Wait for namespace {{ test_name }} to be fully deleted
+  command: kubectl get ns {{ test_name }}
   register: delete_namespace
   failed_when:
     - delete_namespace.rc == 0
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 8887e38fe..4fbe01d40 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -89,8 +89,8 @@
     - item in pods_running
     with_items: "{{ pod_ips }}"
 
-  - name: Ping between pods is working  # noqa 305
-    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
+  - name: Ping between pods is working
+    command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when:
     - not item[0] in pods_hostnet
     - not item[1] in pods_hostnet
@@ -98,8 +98,8 @@
     - "{{ pod_names }}"
     - "{{ pod_ips }}"
 
-  - name: Ping between hostnet pods is working  # noqa 305
-    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
+  - name: Ping between hostnet pods is working
+    command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when:
     - item[0] in pods_hostnet
     - item[1] in pods_hostnet
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 541235255..9cc38cfdb 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -14,8 +14,8 @@
     netchecker_port: 31081
 
   tasks:
-    - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)  # noqa 305
-      shell: "ethtool --offload flannel.1 rx off tx off"
+    - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
+      command: "ethtool --offload flannel.1 rx off tx off"
       ignore_errors: true
       when:
         - kube_network_plugin|default('calico') == 'flannel'
@@ -214,8 +214,8 @@
         - inventory_hostname == groups['kube-master'][0]
         - kube_network_plugin_multus|default(false)|bool
 
-    - name: Check secondary macvlan interface  # noqa 305
-      shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
+    - name: Check secondary macvlan interface
+      command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
       register: output
       until: output.rc == 0
       retries: 90
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index bae50b87d..589a712e0 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
-- name: Generate dump folder  # noqa 305
-  shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
+- name: Generate dump folder
+  command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
   no_log: true
   when: inventory_hostname in groups['kube-master']
 
-- 
GitLab