diff --git a/.ansible-lint b/.ansible-lint
index ececfc57359ae28ddb90b3e7101abe7d5aea41ec..e1909e9666e553060a7747cc82eaafdfedb22add 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -2,15 +2,8 @@
 parseable: true
 skip_list:
   # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
-  # The following rules throw errors.
-  # These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
-  - '301'
-  - '302'
-  - '303'
-  - '305'
-  - '306'
-  - '404'
-  - '503'
+
+  # DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
 
   # These rules are intentionally skipped:
   #
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index 20a06e10c4037645982e7d65a68aa66e32e24f51..ccc5e219a7a80d8adfe2642a8543fe1a0e2d3581 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 
-- name: Query Azure VMs
+- name: Query Azure VMs  # noqa 301
   command: azure vm list-ip-address --json {{ azure_resource_group }}
   register: vm_list_cmd
 
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index e53912cfcd0904790fb2bef0f0f5e6595e0309a4..6ba7d5a873375626e8691bb68bd9f4796a5e97b4 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -1,14 +1,14 @@
 ---
 
-- name: Query Azure VMs IPs
+- name: Query Azure VMs IPs  # noqa 301
   command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
   register: vm_ip_list_cmd
 
-- name: Query Azure VMs Roles
+- name: Query Azure VMs Roles  # noqa 301
   command: az vm list -o json --resource-group {{ azure_resource_group }}
   register: vm_list_cmd
 
-- name: Query Azure Load Balancer Public IP
+- name: Query Azure Load Balancer Public IP  # noqa 301
   command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
   register: lb_pubip_cmd
 
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 40ca53cd6d9607113ee59f5cd8e1fc966d63d73d..5b63a6b37d071dc63ac4235e3aafb269571fe8d8 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -69,7 +69,7 @@
 
 # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
 # handle manually
-- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
+- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)  # noqa 301
   raw: |
     echo {{ item | hash('sha1') }} > /etc/machine-id.new
     mv -b /etc/machine-id.new /etc/machine-id
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
index 8f80914f872ab02b00bbfec617ccf3f91b286759..2865b1004104f003a227acef9ec23136260b9810 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
@@ -7,7 +7,7 @@
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
-- name: Ensure GlusterFS client will reinstall if the PPA was just added.
+- name: Ensure GlusterFS client will reinstall if the PPA was just added.  # noqa 503
   apt:
     name: "{{ item }}"
     state: absent
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
index 3b586c539a0ce09ba64b2b81f95e8adf6440e962..855fe36bf5fbdb9d1a031afa6f3de4a10d0baa4d 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
@@ -7,7 +7,7 @@
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
-- name: Ensure GlusterFS will reinstall if the PPA was just added.
+- name: Ensure GlusterFS will reinstall if the PPA was just added.  # noqa 503
   apt:
     name: "{{ item }}"
     state: absent
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
index 0ffd6f469f2e5fbbcbef40fe45e0aa8dc7577144..e6b16e54a10537751e2a44739a8b1a6e9480399b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
@@ -6,7 +6,7 @@
 - name: "Delete bootstrap Heketi."
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
   when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
-- name: "Ensure there is nothing left over."
+- name: "Ensure there is nothing left over."  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
index 7d2c5981e7ef4a3a49c940740e7392910615836a..07e86237cec15ba5d3c7d405a4ec0d47f28b12b4 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -13,7 +13,7 @@
 - name: "Copy topology configuration into container."
   changed_when: false
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
-- name: "Load heketi topology."
+- name: "Load heketi topology."  # noqa 503
   when: "render.changed"
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
   register: "load_heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
index 14ab97793991d5c7f2d39d693b89dde66a9636ea..dc93d782877d4f58d97abf7c3d06e660f50624af 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
@@ -18,7 +18,7 @@
 - name: "Provision database volume."
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
   when: "heketi_database_volume_exists is undefined"
-- name: "Copy configuration from pod."
+- name: "Copy configuration from pod."  # noqa 301
   become: true
   command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
 - name: "Get heketi volume ids."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
index dd1e272beb01b36d75d96c6be2d7abe169884f03..4430a55926a97982c750bd12b13f4bb4edb20aa0 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -10,10 +10,10 @@
   template:
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
-- name: "Copy topology configuration into container."
+- name: "Copy topology configuration into container."  # noqa 503
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
-- name: "Load heketi topology."
+- name: "Load heketi topology."  # noqa 503
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
 - name: "Get heketi topology."
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
index 9ace96e6274e97311f1dbb48a5999b5f023c8e1d..7ddbf65c81f86fdcad3d3a97ac6f32a71995be8f 100644
--- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -22,7 +22,7 @@
   ignore_errors: true
   changed_when: false
 
-- name: "Remove volume groups."
+- name: "Remove volume groups."  # noqa 301
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
@@ -30,7 +30,7 @@
   with_items: "{{ volume_groups.stdout_lines }}"
   loop_control: { loop_var: "volume_group" }
 
-- name: "Remove physical volume from cluster disks."
+- name: "Remove physical volume from cluster disks."  # noqa 301
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
index ddc56b256ad65978cda196c3605d6a036fac5cc0..18c11a7315e39fb2ec81d9f229765618c580957c 100644
--- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -1,43 +1,43 @@
 ---
-- name: "Remove storage class."
+- name: "Remove storage class."  # noqa 301
   command: "{{ bin_dir }}/kubectl delete storageclass gluster"
   ignore_errors: true
-- name: "Tear down heketi."
+- name: "Tear down heketi."  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
   ignore_errors: true
-- name: "Tear down heketi."
+- name: "Tear down heketi."  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
   ignore_errors: true
 - name: "Tear down bootstrap."
   include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
-- name: "Ensure there is nothing left over."
+- name: "Ensure there is nothing left over."  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Ensure there is nothing left over."
+- name: "Ensure there is nothing left over."  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Tear down glusterfs."
+- name: "Tear down glusterfs."  # noqa 301
   command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
   ignore_errors: true
-- name: "Remove heketi storage service."
+- name: "Remove heketi storage service."  # noqa 301
   command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
   ignore_errors: true
-- name: "Remove heketi gluster role binding"
+- name: "Remove heketi gluster role binding"  # noqa 301
   command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
   ignore_errors: true
-- name: "Remove heketi config secret"
+- name: "Remove heketi config secret"  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
   ignore_errors: true
-- name: "Remove heketi db backup"
+- name: "Remove heketi db backup"  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
   ignore_errors: true
-- name: "Remove heketi service account"
+- name: "Remove heketi service account"  # noqa 301
   command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
   ignore_errors: true
 - name: "Get secrets"
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index 114e4cf0c7d532323b4c71872903808c18792a3b..0e1584470d77d1c3b0f45dbece904e3d8fe0f16a 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -16,13 +16,13 @@
         src: get_cinder_pvs.sh
         dest: /tmp
         mode: u+rwx
-    - name: Get PVs provisioned by in-tree cloud provider
+    - name: Get PVs provisioned by in-tree cloud provider  # noqa 301
       command: /tmp/get_cinder_pvs.sh
       register: pvs
     - name: Remove get_cinder_pvs.sh
       file:
         path: /tmp/get_cinder_pvs.sh
         state: absent
-    - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
+    - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation  # noqa 301
       command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
       loop: "{{ pvs.stdout_lines | list }}"
diff --git a/roles/container-engine/containerd/tasks/crictl.yml b/roles/container-engine/containerd/tasks/crictl.yml
index eaa94efa3f726c945fd5c3cb19e8ea7ab0b3edf0..9310cb9458693c7b5620724b5d0296406a9494bd 100644
--- a/roles/container-engine/containerd/tasks/crictl.yml
+++ b/roles/container-engine/containerd/tasks/crictl.yml
@@ -4,7 +4,7 @@
   vars:
     download: "{{ download_defaults | combine(downloads.crictl) }}"
 
-- name: Install crictl config
+- name: Install crictl config  # noqa 404
   template:
     src: ../templates/crictl.yaml.j2
     dest: /etc/crictl.yaml
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index ebed7e11f62e7c88c8b47446c48b8677b68dc56d..8859b1691903f62376ea570d27a17aa729609c0f 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -34,7 +34,7 @@
   tags:
     - facts
 
-- name: disable unified_cgroup_hierarchy in Fedora 31+
+- name: disable unified_cgroup_hierarchy in Fedora 31+  # noqa 305
   shell:
     cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
   when:
diff --git a/roles/container-engine/cri-o/tasks/crictl.yml b/roles/container-engine/cri-o/tasks/crictl.yml
index e9698053349c47ad5554ce059fc5717771518b29..146fef66a769e2c011053655ef7330f019df218b 100644
--- a/roles/container-engine/cri-o/tasks/crictl.yml
+++ b/roles/container-engine/cri-o/tasks/crictl.yml
@@ -4,7 +4,7 @@
   vars:
     download: "{{ download_defaults | combine(downloads.crictl) }}"
 
-- name: Install crictl config
+- name: Install crictl config  # noqa 404
   template:
     src: ../templates/crictl.yaml.j2
     dest: /etc/crictl.yaml
@@ -21,7 +21,7 @@
     group: no
   delegate_to: "{{ inventory_hostname }}"
 
-- name: Get crictl completion
+- name: Get crictl completion  # noqa 305
   shell: "{{ bin_dir }}/crictl completion"
   changed_when: False
   register: cri_completion
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index af0ecb92eec0b20b720110e4f0de8a92db0290a0..6b6f114d2510a955b3a160fe9dd752902d0817dc 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -59,7 +59,7 @@
     - ansible_distribution == "CentOS"
     - ansible_distribution_major_version == "8"
 
-- name: Ensure latest version of libseccom installed
+- name: Ensure latest version of libseccom installed  # noqa 303
   command: "yum update -y libseccomp"
   when:
     - ansible_distribution == "CentOS"
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index 9369186bb7a254c0b2495c62c8ad6e22fac8903b..c444f897c0341d1e67e465b7d090b6daa65f4024 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -47,7 +47,7 @@
   tags:
     - facts
 
-- name: disable unified_cgroup_hierarchy in Fedora 31+
+- name: disable unified_cgroup_hierarchy in Fedora 31+  # noqa 305
   shell:
     cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
   when:
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index 23464dabb0470778f3b4ec157f8e303b4de1dbcf..b884c7cf060b3a9cf3aa60eae58e88bfe5afd68f 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -28,13 +28,13 @@
   set_fact:
     docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
 
-- name: check system nameservers
+- name: check system nameservers  # noqa 306
   shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
   changed_when: False
   register: system_nameservers
   check_mode: no
 
-- name: check system search domains
+- name: check system search domains  # noqa 306
   shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
   changed_when: False
   register: system_search_domains
diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml
index 0a232ea9eed01b1f4ae359a10b1df5635a746bb6..108eea18854685dee08eb9ddfc55c66f69827791 100644
--- a/roles/container-engine/docker/tasks/systemd.yml
+++ b/roles/container-engine/docker/tasks/systemd.yml
@@ -11,7 +11,7 @@
   notify: restart docker
   when: http_proxy is defined or https_proxy is defined
 
-- name: get systemd version
+- name: get systemd version  # noqa 306
   # noqa 303 - systemctl is called intentionally here
   shell: systemctl --version | head -n 1 | cut -d " " -f 2
   register: systemd_version
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index 9361b87c5baa57e08b47f98590ec6e93800e4dd7..14dc114fa98747fc928311bc54cc8fe03dba9526 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -4,7 +4,7 @@
 # the template, just replace all instances  of {{ `{{` }} with {{ and {{ '}}' }} with }}.
 # It will output something like the following:
 # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
-- name: check_pull_required |  Generate a list of information about the images on a node
+- name: check_pull_required |  Generate a list of information about the images on a node  # noqa 305
   shell: "{{ image_info_command }}"
   no_log: true
   register: docker_images
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 234bf1f955708bd8b3c94e6464e4a3ec7e5a8323..28b3867f28ceee039382fa2eb1bf0c8fb4c45021 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -63,7 +63,7 @@
         - pull_required or download_run_once
         - not image_is_cached
 
-    - name: download_container | Save and compress image
+    - name: download_container | Save and compress image  # noqa 305
       shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
       delegate_to: "{{ download_delegate }}"
       delegate_facts: no
@@ -103,7 +103,7 @@
         - pull_required
         - download_force_cache
 
-    - name: download_container | Load image into docker
+    - name: download_container | Load image into docker  # noqa 305
       shell: "{{ image_load_command }}"
       register: container_load_status
       failed_when: container_load_status is failed
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 8e1d131ca5bc99b363995a35ae0950ba57073a1a..2ac1253f167c24227756fd9802313584cafcb0ef 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -32,7 +32,7 @@
     - localhost
     - asserts
 
-- name: prep_download | On localhost, check if user has access to docker without using sudo
+- name: prep_download | On localhost, check if user has access to docker without using sudo  # noqa 305
   shell: "{{ image_info_command_on_localhost }}"
   delegate_to: localhost
   connection: local
@@ -68,7 +68,7 @@
     - localhost
     - asserts
 
-- name: prep_download | Register docker images info
+- name: prep_download | Register docker images info  # noqa 305
   shell: "{{ image_info_command }}"
   no_log: true
   register: docker_images
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index 411ef5b3ff3b2f7d31143d193c4644f17f5feb75..c97c19e0b6014583e3061b5b172ae645d6c87f49 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -30,7 +30,7 @@
     mode: "0755"
     state: file
 
-- name: prep_kubeadm_images | Generate list of required images
+- name: prep_kubeadm_images | Generate list of required images  # noqa 306
   shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns"
   register: kubeadm_images_raw
   run_once: true
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 56d5f86c8b9c222f29a8709533edc11bf2d99ae9..39df567f64c87449d5739d31ec3225d1fd2f6107 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -1,5 +1,5 @@
 ---
-- name: Configure | Check if etcd cluster is healthy
+- name: Configure | Check if etcd cluster is healthy  # noqa 306
   shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health  2>&1 | grep -q -v 'Error: unhealthy cluster'"
   register: etcd_cluster_is_healthy
   failed_when: false
@@ -16,7 +16,7 @@
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
 
-- name: Configure | Check if etcd-events cluster is healthy
+- name: Configure | Check if etcd-events cluster is healthy  # noqa 306
   shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health  2>&1 | grep -q -v 'Error: unhealthy cluster'"
   register: etcd_events_cluster_is_healthy
   failed_when: false
@@ -73,7 +73,7 @@
   ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
   when: is_etcd_master and etcd_events_cluster_setup
 
-- name: Configure | Wait for etcd cluster to be healthy
+- name: Configure | Wait for etcd cluster to be healthy  # noqa 306
   shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
   register: etcd_cluster_is_healthy
   until: etcd_cluster_is_healthy.rc == 0
@@ -94,7 +94,7 @@
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
 
-- name: Configure | Wait for etcd-events cluster to be healthy
+- name: Configure | Wait for etcd-events cluster to be healthy  # noqa 306
   shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
   register: etcd_events_cluster_is_healthy
   until: etcd_events_cluster_is_healthy.rc == 0
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 651b76719fcf952f4363f79b43056479dcac4111..5dd25547dc8d9b462997e5afc62b21f12cf3dbd6 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -139,7 +139,7 @@
         inventory_hostname in groups['k8s-cluster']) and
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
 
-- name: Gen_certs | Copy certs on nodes
+- name: Gen_certs | Copy certs on nodes  # noqa 306
   shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
   args:
     executable: /bin/bash
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index a6a197a7422f9323835380cbd917c0bb4e89c2a4..c4de329067c05ec45a779d3dfe896fb83a3583c9 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -1,5 +1,5 @@
 ---
-- name: Join Member | Add member to etcd-events cluster
+- name: Join Member | Add member to etcd-events cluster  # noqa 301 305
   shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0
@@ -24,7 +24,7 @@
         {%- endif -%}
       {%- endfor -%}
 
-- name: Join Member | Ensure member is in etcd-events cluster
+- name: Join Member | Ensure member is in etcd-events cluster  # noqa 306
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
   register: etcd_events_member_in_cluster
   changed_when: false
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index e7ee2a348f621c94466393890406a0610980024c..24a800bef1ab3a9a6dc653f510bc9fbd4f9ce53d 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -1,5 +1,5 @@
 ---
-- name: Join Member | Add member to etcd cluster
+- name: Join Member | Add member to etcd cluster  # noqa 301 305
   shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0
@@ -24,7 +24,7 @@
         {%- endif -%}
       {%- endfor -%}
 
-- name: Join Member | Ensure member is in etcd cluster
+- name: Join Member | Ensure member is in etcd cluster  # noqa 306
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
   register: etcd_member_in_cluster
   changed_when: false
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index 1f9da04f26070eb46fedf96d0069924576a3fd63..d9b4d5ef8aa867d8e2593c2b1762b9de7696c295 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -23,14 +23,14 @@
     remote_src: true
   register: etcd_ca_cert
 
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)  # noqa 503
   command: update-ca-certificates
   when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
 
-- name: Gen_certs | update ca-certificates (RedHat)
+- name: Gen_certs | update ca-certificates (RedHat)  # noqa 503
   command: update-ca-trust extract
   when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
 
-- name: Gen_certs | update ca-certificates (ClearLinux)
+- name: Gen_certs | update ca-certificates (ClearLinux)  # noqa 503
   command: clrtrust add "{{ ca_cert_path }}"
   when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
index 860ca0e3ae17c8b237d8d1e4735340ddd3796e7f..4a3ebff4d24fc447113261ba3d842863cf585420 100644
--- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
+++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
@@ -32,7 +32,7 @@
   register: helmcert_master
   run_once: true
 
-- name: Gen_helm_tiller_certs | run cert generation script
+- name: Gen_helm_tiller_certs | run cert generation script  # noqa 301
   run_once: yes
   delegate_to: "{{ groups['kube-master'][0] }}"
   command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
@@ -57,7 +57,7 @@
   with_items:
     - "{{ helm_client_certs }}"
 
-- name: Gen_helm_tiller_certs | Gather helm client certs
+- name: Gen_helm_tiller_certs | Gather helm client certs  # noqa 306
   # noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module
   shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0"
   args:
@@ -85,7 +85,7 @@
     mode: "0600"
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
 
-- name: Gen_helm_tiller_certs | Unpack helm certs on masters
+- name: Gen_helm_tiller_certs | Unpack helm certs on masters  # noqa 306
   shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}"
   no_log: true
   changed_when: false
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index a830f563d49ecee5037c788ece622431ca8127a4..5887ce3c895d3d8c392d8f4f438d193e20f0e1b3 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -52,7 +52,7 @@
     - helm_version is version('v3.0.0', '<')
 
 # FIXME: https://github.com/helm/helm/issues/6374
-- name: Helm | Install/upgrade helm
+- name: Helm | Install/upgrade helm  # noqa 306
   shell: >
     {{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }}
     {% if helm_skip_refresh %} --skip-refresh{% endif %}
@@ -78,7 +78,7 @@
   environment: "{{ proxy_env }}"
 
 # FIXME: https://github.com/helm/helm/issues/4063
-- name: Helm | Force apply tiller overrides if necessary
+- name: Helm | Force apply tiller overrides if necessary  # noqa 306
   shell: >
     {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
     {% if helm_skip_refresh %} --skip-refresh{% endif %}
@@ -108,7 +108,7 @@
     - helm_version is version('v3.0.0', '>=')
     - helm_stable_repo_url is defined
 
-- name: Make sure bash_completion.d folder exists
+- name: Make sure bash_completion.d folder exists  # noqa 503
   file:
     name: "/etc/bash_completion.d/"
     state: directory
@@ -116,7 +116,7 @@
     - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
     - ansible_os_family in ["ClearLinux"]
 
-- name: Helm | Set up bash completion
+- name: Helm | Set up bash completion  # noqa 503
   shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
   when:
     - ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index 9528aa02dc05b74c60c8e9cdbe5462c48f8e2289..37f086849838b9f66524621492c099b811eb6a14 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: "calico upgrade complete"
+- name: "calico upgrade complete"  # noqa 305
   shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
   when:
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
index 347d1b4c2d2747a0451b37b300196ea63d6961a2..e9de24b52771cbe9d693802cc481ffb48649a6a5 100644
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Rotate Tokens | Get default token name
+- name: Rotate Tokens | Get default token name  # noqa 306
   shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
   register: default_token
   changed_when: false
@@ -29,7 +29,7 @@
 
 # FIXME(mattymo): Exclude built in secrets that were automatically rotated,
 # instead of filtering manually
-- name: Rotate Tokens | Get all serviceaccount tokens to expire
+- name: Rotate Tokens | Get all serviceaccount tokens to expire  # noqa 306
   shell: >-
     {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
     -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index bbb1ce0e029532b4e0566501c00b8469db344b38..2baeadf234eed7ab0c9e11dd96f0757e0d0d8ad7 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -48,7 +48,7 @@
     timeout: 180
 
 # NOTE(mattymo): Please forgive this workaround
-- name: Generate admin kubeconfig with external api endpoint
+- name: Generate admin kubeconfig with external api endpoint  # noqa 302
   shell: >-
     mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
     {{ bin_dir }}/kubeadm
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index bf2c26879721984e3ab8b72666a02ca7978f415f..91bc35eb2998f1e3473e78cd09a83848bcfc9649 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -22,7 +22,7 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
 
-- name: Calculate kubeadm CA cert hash
+- name: Calculate kubeadm CA cert hash  # noqa 306
   shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
   register: kubeadm_ca_hash
   when:
@@ -107,7 +107,7 @@
 
 # FIXME(mattymo): Need to point to localhost, otherwise masters will all point
 #                 incorrectly to first master, creating SPoF.
-- name: Update server field in kube-proxy kubeconfig
+- name: Update server field in kube-proxy kubeconfig  # noqa 306
   shell: >-
     {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
     | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
@@ -131,7 +131,7 @@
     group: root
     mode: "0644"
 
-- name: Restart all kube-proxy pods to ensure that they load the new configmap
+- name: Restart all kube-proxy pods to ensure that they load the new configmap  # noqa 305
   shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
   run_once: true
   delegate_to: "{{ groups['kube-master']|first }}"
@@ -157,7 +157,7 @@
 
 # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
 # is fixed
-- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
+- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services  # noqa 305
   shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
   run_once: true
   delegate_to: "{{ groups['kube-master']|first }}"
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index d739fbc8f6cd970d20a2a4bc93b157d5e128d07c..1363206f6bda98455ef07a6dcb29150b5a155f08 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -47,7 +47,7 @@
   when:
     - old_apiserver_cert.stat.exists
 
-- name: kubeadm | Forcefully delete old static pods
+- name: kubeadm | Forcefully delete old static pods  # noqa 306
   shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when:
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index d6ce320ba033ce75d3457dd5e3151a8eb05c8948..06c3eb5250f95081efd6428ec2b0058ab3c8da66 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -8,7 +8,7 @@
   register: kube_apiserver_manifest_replaced
   when: etcd_secret_changed|default(false)
 
-- name: "Pre-upgrade | Delete master containers forcefully"
+- name: "Pre-upgrade | Delete master containers forcefully"  # noqa 306 503
   shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 21300e3adc4b8e56f559b75b7cc55be736214c74..473aaf7ebb2c0eb0f60e4dfdd451bebcf32f7e8f 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -45,7 +45,7 @@
   tags:
     - kube-proxy
 
-- name: Verify if br_netfilter module exists
+- name: Verify if br_netfilter module exists  # noqa 305
   shell: "modinfo br_netfilter"
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH's conservative path management
diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml
index 78a39567c15f472611c13381877377ab5575f8f5..918edfac5aa1873f78264d494f24ce9253230be2 100644
--- a/roles/kubernetes/node/tasks/pre_upgrade.yml
+++ b/roles/kubernetes/node/tasks/pre_upgrade.yml
@@ -1,5 +1,5 @@
 ---
-- name: "Pre-upgrade | check if kubelet container exists"
+- name: "Pre-upgrade | check if kubelet container exists"  # noqa 306
   shell: >-
     {% if container_manager in ['crio', 'docker'] %}
     docker ps -af name=kubelet | grep kubelet
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index fd4cec362bb410fd296b22b4defe91f6634d2f2d..097ba1d73b3bdcb6e15c2f3f3e85c4db577f3d56 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -29,7 +29,7 @@
     - Preinstall | reload kubelet
   when: is_fedora_coreos
 
-- name: Preinstall | reload NetworkManager
+- name: Preinstall | reload NetworkManager  # noqa 303
   command: systemctl restart NetworkManager.service
   when: is_fedora_coreos
 
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 987a4643a54370dfe2834b7318c83ac394b559c7..599289d90f682e8434bd185be95aae3c56036285 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -158,7 +158,7 @@
   when:
     - kube_network_plugin == 'calico'
 
-- name: "Get current version of calico cluster version"
+- name: "Get current version of calico cluster version"  # noqa 306
   shell: "{{ bin_dir }}/calicoctl.sh version  | grep 'Cluster Version:' | awk '{ print $3}'"
   register: calico_version_on_server
   run_once: yes
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index 0a4cd9ef3a582f0821e594c54fd777bc7df2eac8..a488f2fe0ea47e1867a7a27b380e00b69aca7f38 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -24,14 +24,14 @@
   set_fact:
     is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
 
-- name: check resolvconf
+- name: check resolvconf  # noqa 305
   shell: which resolvconf
   register: resolvconf
   failed_when: false
   changed_when: false
   check_mode: no
 
-- name: check systemd-resolved
+- name: check systemd-resolved  # noqa 303
   command: systemctl is-active systemd-resolved
   register: systemd_resolved_enabled
   failed_when: false
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index 44b99a5718de6891eb3ab4112c1b7c3a3255f1a5..2c3546e222c209009ba7c14ea31b8cef340c0034 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -1,5 +1,5 @@
 ---
-- name: Update package management cache (zypper) - SUSE
+- name: Update package management cache (zypper) - SUSE  # noqa 305
   shell: zypper -n --gpg-auto-import-keys ref
   register: make_cache_output
   until: make_cache_output is succeeded
diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index b00c576eda82fbbfcff77a7f160ebc5fd736bb4a..69aa6518609c6d3e6ae6e15d49bc1553411ae20a 100644
--- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -20,12 +20,12 @@
   changed_when: False
   register: fs_type
 
-- name: run growpart
+- name: run growpart  # noqa 503
   command: growpart /dev/sda 1
   when: growpart_needed.changed
   environment:
     LC_ALL: C
 
-- name: run xfs_growfs
+- name: run xfs_growfs  # noqa 503
   command: xfs_growfs /dev/sda1
   when: growpart_needed.changed and 'XFS' in fs_type.stdout
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 9507a9323bde6b94ea466740ce18a2646f004b83..c6f323b23a52a530141e4bce1f0f0a911906d4ff 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -34,7 +34,7 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
-- name: Gen_tokens | Get list of tokens from first master
+- name: Gen_tokens | Get list of tokens from first master  # noqa 305
   shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
   register: tokens_list
   check_mode: no
@@ -42,7 +42,7 @@
   run_once: true
   when: sync_tokens|default(false)
 
-- name: Gen_tokens | Gather tokens
+- name: Gen_tokens | Gather tokens  # noqa 306
   shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
   args:
     warn: false
@@ -52,7 +52,7 @@
   run_once: true
   when: sync_tokens|default(false)
 
-- name: Gen_tokens | Copy tokens on masters
+- name: Gen_tokens | Copy tokens on masters  # noqa 306
   shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
   when:
     - inventory_hostname in groups['kube-master']
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index 5b80cf1ac41caaabfb23ce17b5c775fdf50bd5ab..4aa78f61efead417466b9544d2fc5f4f3e5fce1c 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Calico-rr | Pre-upgrade tasks
   include_tasks: pre.yml
 
-- name: Calico-rr | Fetch current node object
+- name: Calico-rr | Fetch current node object  # noqa 301
   command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
   register: calico_rr_node
   until: calico_rr_node is succeeded
@@ -15,12 +15,12 @@
       {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
       { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
 
-- name: Calico-rr | Configure route reflector
+- name: Calico-rr | Configure route reflector  # noqa 301 305
   shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
   args:
     stdin: "{{ calico_rr_node_patched | to_json }}"
 
-- name: Calico-rr | Set label for route reflector
+- name: Calico-rr | Set label for route reflector  # noqa 301
   command: >-
     {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
     'i-am-a-route-reflector=true' --overwrite
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
index 99888e21616116a392a8c7968aef8764aafb24e3..dc92912fc42818b7687c3145a52502cff3b9848a 100644
--- a/roles/network_plugin/calico/tasks/check.yml
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -37,7 +37,7 @@
   when:
     - "calico_vxlan_mode in ['Always', 'CrossSubnet']"
 
-- name: "Get current version of calico cluster version"
+- name: "Get current version of calico cluster version"  # noqa 306
   shell: "{{ bin_dir }}/calicoctl.sh version  | grep 'Cluster Version:' | awk '{ print $3}'"
   register: calico_version_on_server
   run_once: yes
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 77aeba6ef6fd4ed8c93481befef534fb79d945ac..85a77f7e3082b3a98c8559db199899afb33f61b0 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -6,7 +6,7 @@
     mode: 0755
     remote_src: yes
 
-- name: Calico | Check if host has NetworkManager
+- name: Calico | Check if host has NetworkManager  # noqa 303
   command: systemctl show NetworkManager
   register: nm_check
   failed_when: false
@@ -84,7 +84,7 @@
   run_once: true
   when: calico_datastore == "etcd"
 
-- name: Calico | Check if calico network pool has already been configured
+- name: Calico | Check if calico network pool has already been configured  # noqa 306
   shell: >
     {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
   register: calico_conf
@@ -131,7 +131,7 @@
   loop_control:
     label: "{{ item.item.file }}"
 
-- name: Calico | Configure calico network pool (version < v3.3.0)
+- name: Calico | Configure calico network pool (version < v3.3.0)  # noqa 306
   shell: >
     echo "
       { "kind": "IPPool",
@@ -149,7 +149,7 @@
     - 'calico_conf.stdout == "0"'
     - calico_version is version("v3.3.0", "<")
 
-- name: Calico | Configure calico network pool (version >= v3.3.0)
+- name: Calico | Configure calico network pool (version >= v3.3.0)  # noqa 306
   shell: >
     echo "
       { "kind": "IPPool",
@@ -176,7 +176,7 @@
     - inventory_hostname in groups['k8s-cluster']
   run_once: yes
 
-- name: Calico | Set global as_num
+- name: Calico | Set global as_num  # noqa 306
   shell: >
     echo '
     { "kind": "BGPConfiguration",
@@ -192,7 +192,7 @@
   when:
     - inventory_hostname == groups['kube-master'][0]
 
-- name: Calico | Configure peering with router(s) at global scope
+- name: Calico | Configure peering with router(s) at global scope  # noqa 306
   shell: >
     echo '{
     "apiVersion": "projectcalico.org/v3",
@@ -214,7 +214,7 @@
     - inventory_hostname == groups['kube-master'][0]
     - peer_with_router|default(false)
 
-- name: Calico | Configure peering with route reflectors at global scope
+- name: Calico | Configure peering with route reflectors at global scope  # noqa 306
   shell: |
     echo '{
     "apiVersion": "projectcalico.org/v3",
@@ -236,7 +236,7 @@
     - inventory_hostname == groups['kube-master'][0]
     - peer_with_calico_rr|default(false)
 
-- name: Calico | Configure route reflectors to peer with each other
+- name: Calico | Configure route reflectors to peer with each other  # noqa 306
   shell: >
     echo '{
     "apiVersion": "projectcalico.org/v3",
@@ -309,7 +309,7 @@
     - inventory_hostname not in groups['kube-master']
     - calico_datastore == "kdd"
 
-- name: Calico | Configure node asNumber for per node peering
+- name: Calico | Configure node asNumber for per node peering  # noqa 306
   shell: >
     echo '{
     "apiVersion": "projectcalico.org/v3",
@@ -333,7 +333,7 @@
     - local_as is defined
     - groups['calico-rr'] | default([]) | length == 0
 
-- name: Calico | Configure peering with router(s) at node scope
+- name: Calico | Configure peering with router(s) at node scope  # noqa 306
   shell: >
     echo '{
     "apiVersion": "projectcalico.org/v3",
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
index e798142f315fc3f17452bb2748c4d22539cd6efe..aaae21bcdd75b639f37740bc113028c94e430e5e 100644
--- a/roles/network_plugin/calico/tasks/pre.yml
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -1,5 +1,5 @@
 ---
-- name: Calico | Get kubelet hostname
+- name: Calico | Get kubelet hostname  # noqa 306
   shell: >-
     {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
     | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml
index 1cdab1262386ea822b94d8f450bd761fd46703b7..0135350720db0013ffe3587cf842a4fcdf297c36 100644
--- a/roles/network_plugin/calico/tasks/reset.yml
+++ b/roles/network_plugin/calico/tasks/reset.yml
@@ -8,11 +8,11 @@
   command: ip link del dummy0
   when: dummy0.stat.exists
 
-- name: reset | get remaining routes set by bird
+- name: reset | get remaining routes set by bird  # noqa 301
   command: ip route show proto bird
   register: bird_routes
 
-- name: reset | remove remaining routes set by bird
+- name: reset | remove remaining routes set by bird  # noqa 301
   command: "ip route del {{ bird_route }} proto bird"
   with_items: "{{ bird_routes.stdout_lines }}"
   loop_control:
diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml
index a4b7cffd65ce37c653d40a9818ab0fe600aa2946..0dceac840588226dfbbd0f320098673a0b1a649e 100644
--- a/roles/network_plugin/calico/tasks/upgrade.yml
+++ b/roles/network_plugin/calico/tasks/upgrade.yml
@@ -16,11 +16,11 @@
     - "etcdv2"
     - "etcdv3"
 
-- name: "Tests data migration (dry-run)"
+- name: "Tests data migration (dry-run)"  # noqa 301 305
   shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
   register: calico_upgrade_test_data
   failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
 
-- name: "If test migration is success continue with calico data real migration"
+- name: "If test migration is success continue with calico data real migration"  # noqa 301 305
   shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
   register: calico_upgrade_migration_data
diff --git a/roles/network_plugin/contiv/tasks/pre-reset.yml b/roles/network_plugin/contiv/tasks/pre-reset.yml
index a811d59213f778c5283f6277003cf8f30779ef3b..f7e66f01ff4aaf6ad632270128ce22f3b286e2d5 100644
--- a/roles/network_plugin/contiv/tasks/pre-reset.yml
+++ b/roles/network_plugin/contiv/tasks/pre-reset.yml
@@ -21,7 +21,7 @@
     - contiv_kubectl.stat.exists
     - inventory_hostname == groups['kube-master'][0]
 
-- name: reset | Copy contiv temporary cleanup script
+- name: reset | Copy contiv temporary cleanup script  # noqa 404
   copy:
     src: ../files/contiv-cleanup.sh  # Not in role_path so we must trick...
     dest: /opt/cni/bin/cleanup
@@ -31,7 +31,7 @@
   when:
     - contiv_kubectl.stat.exists
 
-- name: reset | Lay down contiv cleanup template
+- name: reset | Lay down contiv cleanup template  # noqa 404
   template:
     src: ../templates/contiv-cleanup.yml.j2  # Not in role_path so we must trick...
     dest: "{{ kube_config_dir }}/contiv-cleanup.yml"  # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml
index 308b1c625214822ffaa54c7d02b54389d4dd4f86..b254dd99721cb1ba806eb3ac89aad6ce223f32fb 100644
--- a/roles/network_plugin/kube-ovn/tasks/main.yml
+++ b/roles/network_plugin/kube-ovn/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Kube-OVN | Label ovn-db node
+- name: Kube-OVN | Label ovn-db node  # noqa 305
   shell: >-
     {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
   when:
diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml
index 751c3471623dfe35effa43c7cdda24a4013af589..3608a617fc8119bd43e9f05b93b48ade4d881b5f 100644
--- a/roles/network_plugin/macvlan/tasks/main.yml
+++ b/roles/network_plugin/macvlan/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Macvlan | Retrieve Pod Cidr
+- name: Macvlan | Retrieve Pod Cidr  # noqa 301
   command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
   register: node_pod_cidr_cmd
   delegate_to: "{{ groups['kube-master'][0] }}"
@@ -8,7 +8,7 @@
   set_fact:
     node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
 
-- name: Macvlan | Retrieve default gateway network interface
+- name: Macvlan | Retrieve default gateway network interface  # noqa 301
   become: false
   raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/'
   register: node_default_gateway_interface_cmd
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 64cac81dad7182472e04fa756f58373cfd8d32b1..55874d54309cb7cd32cfb438104f79ce00f0cd7c 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Get etcd endpoint health
+- name: Get etcd endpoint health  # noqa 305
   shell: "{{ bin_dir }}/etcdctl endpoint health"
   register: etcd_endpoint_health
   ignore_errors: true
@@ -57,7 +57,7 @@
     - groups['broken_etcd']
     - "item.rc != 0 and not 'No such file or directory' in item.stderr"
 
-- name: Get etcd cluster members
+- name: Get etcd cluster members  # noqa 305
   shell: "{{ bin_dir }}/etcdctl member list"
   register: member_list
   changed_when: false
@@ -73,7 +73,7 @@
     - not healthy
     - has_quorum
 
-- name: Remove broken cluster members
+- name: Remove broken cluster members  # noqa 305
   shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
   environment:
     ETCDCTL_API: 3
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index dc101180584ccbc37f9f4d5ad349eeae51961bf0..ff2c726fdd311def724fd33a5125b10536bf0584 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -1,5 +1,5 @@
 ---
-- name: Save etcd snapshot
+- name: Save etcd snapshot  # noqa 305
   shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
   environment:
     - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
@@ -25,7 +25,7 @@
     path: "{{ etcd_data_dir }}"
     state: absent
 
-- name: Restore etcd snapshot
+- name: Restore etcd snapshot  # noqa 301 305
   shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
   environment:
     - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
diff --git a/roles/recover_control_plane/master/tasks/main.yml b/roles/recover_control_plane/master/tasks/main.yml
index 71a0941682bfbd538ae71d7ce34f2ace072c32c2..9cc7c33d60dcce75a53d1f6f433c52b8b3d6a328 100644
--- a/roles/recover_control_plane/master/tasks/main.yml
+++ b/roles/recover_control_plane/master/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Wait for apiserver
+- name: Wait for apiserver  # noqa 305
   shell: "{{ bin_dir }}/kubectl get nodes"
   environment:
     - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
@@ -10,7 +10,7 @@
   changed_when: false
   when: groups['broken_kube-master']
 
-- name: Delete broken kube-master nodes from cluster
+- name: Delete broken kube-master nodes from cluster  # noqa 305
   shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
   environment:
     - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 37aac0df2398337a8e6fdd427fb966840bdf6fbe..c4660ef87c703a540217dbaa62afba59abceafbb 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Delete node
+- name: Delete node  # noqa 301
   command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
   delegate_to: "{{ groups['kube-master']|first }}"
   ignore_errors: yes
\ No newline at end of file
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index f287aa3dd95ce2f6ffddac91c7a92b4b1a5ec926..32421c1a38d44219aa9852c798e11ded935e6d25 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: cordon-node | Mark all nodes as unschedulable before drain
+- name: cordon-node | Mark all nodes as unschedulable before drain  # noqa 301
   command: >-
     {{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }}
   with_items:
@@ -9,7 +9,7 @@
   run_once: true
   ignore_errors: yes
 
-- name: remove-node | Drain node except daemonsets resource
+- name: remove-node | Drain node except daemonsets resource  # noqa 301
   command: >-
     {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
       --force
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index ffd95a4c8c8fba1f0d0a484151f1635d6c325753..21a02660651b20b73c6e566072e474eb070e5b26 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -34,7 +34,7 @@
   delegate_to: "{{ groups['etcd']|first }}"
   when: inventory_hostname in groups['etcd']
 
-- name: Remove etcd member from cluster
+- name: Remove etcd member from cluster  # noqa 305
   shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
   register: etcd_member_in_cluster
   changed_when: false
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 5fd98fd6f917bcc3cc3b9253b16075591e6aefdd..4a9b13df94eed4a610412b9065168caf2efd23c3 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -41,12 +41,12 @@
   tags:
     - docker
 
-- name: reset | systemctl daemon-reload
+- name: reset | systemctl daemon-reload  # noqa 503
   systemd:
     daemon_reload: true
   when: services_removed.changed or docker_dropins_removed.changed
 
-- name: reset | remove all containers
+- name: reset | remove all containers  # noqa 306
   shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
   register: remove_all_containers
   retries: 4
@@ -56,7 +56,7 @@
   tags:
     - docker
 
-- name: reset | restart docker if needed
+- name: reset | restart docker if needed  # noqa 503
   service:
     name: docker
     state: restarted
@@ -64,7 +64,7 @@
   tags:
     - docker
 
-- name: reset | stop all cri containers
+- name: reset | stop all cri containers  # noqa 306
   shell: "crictl ps -aq | xargs -r crictl -t 60s stop"
   register: remove_all_cri_containers
   retries: 5
@@ -75,7 +75,7 @@
     - containerd
   when: container_manager in ["crio", "containerd"]
 
-- name: reset | remove all cri containers
+- name: reset | remove all cri containers  # noqa 306
   shell: "crictl ps -aq | xargs -r crictl -t 60s rm"
   register: remove_all_cri_containers
   retries: 5
@@ -86,7 +86,7 @@
     - containerd
   when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true)
 
-- name: reset | stop all cri pods
+- name: reset | stop all cri pods  # noqa 306
   shell: "crictl pods -q | xargs -r crictl -t 60s stopp"
   register: remove_all_cri_containers
   retries: 5
@@ -97,7 +97,7 @@
     - containerd
   when: container_manager in ["crio", "containerd"]
 
-- name: reset | remove all cri pods
+- name: reset | remove all cri pods  # noqa 306
   shell: "crictl pods -q | xargs -r crictl -t 60s rmp"
   register: remove_all_cri_containers
   retries: 5
@@ -130,7 +130,7 @@
   tags:
     - services
 
-- name: reset | gather mounted kubelet dirs
+- name: reset | gather mounted kubelet dirs  # noqa 306 301
   shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
   args:
     warn: false
@@ -139,7 +139,7 @@
   tags:
     - mounts
 
-- name: reset | unmount kubelet dirs
+- name: reset | unmount kubelet dirs  # noqa 301
   command: umount -f {{ item }}
   with_items: "{{ mounted_dirs.stdout_lines }}"
   register: umount_dir
@@ -161,7 +161,7 @@
   tags:
     - iptables
 
-- name: Clear IPVS virtual server table
+- name: Clear IPVS virtual server table  # noqa 305
   shell: "ipvsadm -C"
   when:
     - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 7524e349071c4f9c4aa85a6033bdc467d0000b9a..8fd3e5c032c86b28ac7f2c793e195a0ce41c3567 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -112,7 +112,7 @@
           {%- endfor %}
       when: "'etcd' in groups"
 
-    - name: Storing commands output
+    - name: Storing commands output  # noqa 306
       shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
       failed_when: false
       with_items: "{{ commands }}"
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index aae85e4092fefb6d09e28989afb6c9a63b40e790..08f26694af5745de46c8143622ccdb46d9a8122e 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -34,7 +34,7 @@
   when:
     - item.value.converted|bool
 
-- name: Resize images
+- name: Resize images  # noqa 301
   command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
   with_dict:
     - "{{ images }}"
@@ -45,15 +45,15 @@
     src: Dockerfile
     dest: "{{ images_dir }}/Dockerfile"
 
-- name: Create docker images for each OS
+- name: Create docker images for each OS  # noqa 301
   command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
   with_dict:
     - "{{ images }}"
 
-- name: docker login
+- name: docker login  # noqa 301
   command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
 
-- name: docker push image
+- name: docker push image  # noqa 301
   command: docker push {{ registry }}/vm-{{ item.key }}:latest
   with_dict:
     - "{{ images }}"
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index eb33d9838ad8224761daa249cd4e168214c2fd12..52317e794c6c237f03673c89536510e25408cfdb 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -18,7 +18,7 @@
       instance_tags: "{{ aws.tags }}"
     register: ec2
 
-  - name: Template the inventory
+  - name: Template the inventory  # noqa 404
     template:
       src: ../templates/inventory-aws.j2
       dest: "{{ inventory_path }}"
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 37fbafbd6106ff77ef50a4ee88f99cc6d20f34b2..5d41f714df6326648ffdb4335e7631027443a209 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -86,7 +86,7 @@
         msg: "{{ droplets }}, {{ inventory_path }}"
       when: state == 'present'
 
-    - name: Template the inventory
+    - name: Template the inventory  # noqa 404
       template:
         src: ../templates/inventory-do.j2
         dest: "{{ inventory_path }}"
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index 2664810799bb83c2c327e182cabb9a8511303e54..57e5d1d4116fd3c2f47f877132b7e8f32cce09e5 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -49,7 +49,7 @@
       add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
       with_items: '{{ gce.instance_data }}'
 
-    - name: Template the inventory
+    - name: Template the inventory  # noqa 404
       template:
         src: ../templates/inventory-gce.j2
         dest: "{{ inventory_path }}"
@@ -60,7 +60,7 @@
         state: directory
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
-    - name: Template fake hosts group vars
+    - name: Template fake hosts group vars  # noqa 404
       template:
         src: ../templates/fake_hosts.yml.j2
         dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
index 939d432a64dabe3a7104610edcf5776110a1a737..53edd09683434d1c061a00dc99fcc5fa4c3ed68c 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
@@ -29,7 +29,7 @@
   loop_control:
     index_var: vm_id
 
-- name: Wait for vms to have ipaddress assigned
+- name: Wait for vms to have ipaddress assigned  # noqa 301 306
   shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'"
   register: vm_ips
   loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
index dc66e2db7c47f74d9f2d1c567d2233302ee3c77c..a37d4ed1454522a917c6f7b194ec0f4fb535b58a 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
@@ -16,7 +16,7 @@
     state: absent
     name: "{{ test_name }}"
 
-- name: Wait for namespace {{ test_name }} to be fully deleted
+- name: Wait for namespace {{ test_name }} to be fully deleted  # noqa 305
   shell: kubectl get ns {{ test_name }}
   register: delete_namespace
   failed_when:
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index f1e3cbaca52ba8d6567e6b113a991f6605aac951..6e6457ba5256950d897aa7f7b5ffa853007db96e 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -7,7 +7,7 @@
     expire_days: 2
 
   tasks:
-    - name: Generate uniq bucket name prefix
+    - name: Generate uniq bucket name prefix  # noqa 301
       raw: date +%Y%m%d
       register: out
 
@@ -52,7 +52,7 @@
       no_log: True
       failed_when: false
 
-    - name: Apply the lifecycle rules
+    - name: Apply the lifecycle rules  # noqa 301
       command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
       environment:
         BOTO_CONFIG: "{{ dir }}/.boto"
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index be8370cc3b326cb2f6aef3152664977b89c82ee0..f2cfd2eba2e3493fbbaff39120d55827bb5566dd 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -15,7 +15,7 @@
   - import_role:
       name: cluster-dump
 
-  - name: Check kubectl output
+  - name: Check kubectl output  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get nodes"
     register: get_nodes
     no_log: true
@@ -23,7 +23,7 @@
   - debug:
       msg: "{{ get_nodes.stdout.split('\n') }}"
 
-  - name: Check that all nodes are running and ready
+  - name: Check that all nodes are running and ready  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
     register: get_nodes_yaml
     until:
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index 9679be5fc74f5a83aeb199f1579e4429bdcea2f6..8cf95f114228d379895145f31d82e3c122f5ed28 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -15,7 +15,7 @@
   - import_role:
       name: cluster-dump
 
-  - name: Check kubectl output
+  - name: Check kubectl output  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
@@ -23,7 +23,7 @@
   - debug:
       msg: "{{ get_pods.stdout.split('\n') }}"
 
-  - name: Check that all pods are running and ready
+  - name: Check that all pods are running and ready  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
     register: run_pods_log
     until:
@@ -36,7 +36,7 @@
     failed_when: false
     no_log: true
 
-  - name: Check kubectl output
+  - name: Check kubectl output  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index bee470ef753157ca1a06fee1532e40bcf723506b..8887e38fedcc1e7e09b7ff8b6ec1b6014f4376b8 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -15,10 +15,10 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-  - name: Create test namespace
+  - name: Create test namespace  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl create namespace test"
 
-  - name: Run 2 busybox pods in test ns
+  - name: Run 2 busybox pods in test ns  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null"
     loop:
     - busybox1
@@ -27,7 +27,7 @@
   - import_role:
       name: cluster-dump
 
-  - name: Check that all pods are running and ready
+  - name: Check that all pods are running and ready  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
     register: run_pods_log
     until:
@@ -40,7 +40,7 @@
     failed_when: false
     no_log: true
 
-  - name: Get pod names
+  - name: Get pod names  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
     register: pods
     no_log: true
@@ -49,19 +49,19 @@
       msg: "{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
-  - name: Get hostnet pods
+  - name: Get hostnet pods  # noqa 301
     command: "{{ bin_dir }}/kubectl get pods -n test -o
             jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: hostnet_pods
     no_log: true
 
-  - name: Get running pods
+  - name: Get running pods  # noqa 301
     command: "{{ bin_dir }}/kubectl get pods -n test -o
             jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: running_pods
     no_log: true
 
-  - name: Check kubectl output
+  - name: Check kubectl output  # noqa 301 305
     shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
@@ -89,7 +89,7 @@
     - item in pods_running
     with_items: "{{ pod_ips }}"
 
-  - name: Ping between pods is working
+  - name: Ping between pods is working  # noqa 305
     shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when:
     - not item[0] in pods_hostnet
@@ -98,7 +98,7 @@
     - "{{ pod_names }}"
     - "{{ pod_ips }}"
 
-  - name: Ping between hostnet pods is working
+  - name: Ping between hostnet pods is working  # noqa 305
     shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when:
     - item[0] in pods_hostnet
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index e6ea13a24b1ffa35c6434308c8049118e1a6dbd6..541235255bba11ea1aa3a2febf42cd5757278cbf 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -1,7 +1,7 @@
 ---
 - hosts: kube-node
   tasks:
-    - name: Test tunl0 routes
+    - name: Test tunl0 routes  # noqa 306
       shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
       when:
         - (ipip|default(true) or cloud_provider is defined)
@@ -14,7 +14,7 @@
     netchecker_port: 31081
 
   tasks:
-    - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
+    - name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)  # noqa 305
       shell: "ethtool --offload flannel.1 rx off tx off"
       ignore_errors: true
       when:
@@ -33,7 +33,7 @@
     - import_role:
         name: cluster-dump
 
-    - name: Wait for netchecker server
+    - name: Wait for netchecker server  # noqa 306
       shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
       register: ncs_pod
       until: ncs_pod.stdout.find('Running') != -1
@@ -41,7 +41,7 @@
       delay: 10
       when: inventory_hostname == groups['kube-master'][0]
 
-    - name: Wait for netchecker agents
+    - name: Wait for netchecker agents  # noqa 306
       shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
       register: nca_pod
       until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
@@ -214,7 +214,7 @@
         - inventory_hostname == groups['kube-master'][0]
         - kube_network_plugin_multus|default(false)|bool
 
-    - name: Check secondary macvlan interface
+    - name: Check secondary macvlan interface  # noqa 305
       shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
       register: output
       until: output.rc == 0
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index e1d5d35a6bc0da6c171b5c5f584c80c0c9096424..bae50b87da9f1dec2fbf160ee52bb597de6fbc39 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Generate dump folder
+- name: Generate dump folder  # noqa 305
   shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
   no_log: true
   when: inventory_hostname in groups['kube-master']