diff --git a/.ansible-lint b/.ansible-lint
index 9ea65c48b59342fa074dba750fb3a19b759a0fcb..ec6a9e0c34ffa9ed8dcf9c5b74a99e6e548c75b0 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -7,24 +7,11 @@ skip_list:
 
   # These rules are intentionally skipped:
   #
-  # [E204]: "Lines should be no longer than 160 chars"
-  # This could be re-enabled with a major rewrite in the future.
-  # For now, there's not enough value gain from strictly limiting line length.
-  # (Disabled in May 2019)
-  - '204'
-
-  # [E701]: "meta/main.yml should contain relevant info"
-  # Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
-  # While it can be useful to have these metadata available, they are also available in the existing documentation.
-  # (Disabled in May 2019)
-  - '701'
-
   # [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
   # Meta roles in Kubespray don't need proper names
   # (Disabled in June 2021)
   - 'role-name'
 
-  - 'experimental'
   # [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
   # In Kubespray we use variables that use camelCase to match their k8s counterparts
   # (Disabled in June 2021)
@@ -65,10 +52,6 @@ skip_list:
   # Disable run-once check with free strategy
   # (Disabled in June 2023 after ansible upgrade; FIXME)
   - 'run-once[task]'
-
-  # Disable outdated-tag check
-  # (Disabled in June 2023 after ansible upgrade; FIXME)
-  - 'warning[outdated-tag]'
 exclude_paths:
   # Generated files
   - tests/files/custom_cni/cilium.yaml
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index 6176a34e30101a55155453a006ab822cd3549a6d..3eb121aa0a77d9295cdbf21a469345b626862eb1 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 
-- name: Query Azure VMs  # noqa 301
+- name: Query Azure VMs
   command: azure vm list-ip-address --json {{ azure_resource_group }}
   register: vm_list_cmd
 
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index 4c80c9a546a1e7768e0bdea3a609fae86fef85e4..c628154a038b32ff7b27d2dcce4043fc3dd72f30 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -1,14 +1,14 @@
 ---
 
-- name: Query Azure VMs IPs  # noqa 301
+- name: Query Azure VMs IPs
   command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
   register: vm_ip_list_cmd
 
-- name: Query Azure VMs Roles  # noqa 301
+- name: Query Azure VMs Roles
   command: az vm list -o json --resource-group {{ azure_resource_group }}
   register: vm_list_cmd
 
-- name: Query Azure Load Balancer Public IP  # noqa 301
+- name: Query Azure Load Balancer Public IP
   command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
   register: lb_pubip_cmd
 
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 2541a9319d1f33fbb584fbc8195de8b9fbaa52bc..205f77894ff5069568096376080fbf895dbe7e62 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -69,7 +69,7 @@
 
 # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
 # handle manually
-- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)  # noqa 301
+- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
   raw: |
     echo {{ item | hash('sha1') }} > /etc/machine-id.new
     mv -b /etc/machine-id.new /etc/machine-id
@@ -79,7 +79,6 @@
   with_items: "{{ containers.results }}"
 
 - name: Early hack image install to adapt for DIND
-  # noqa 302 - this task uses the raw module intentionally
   raw: |
     rm -fv /usr/bin/udevadm /usr/sbin/udevadm
   delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
index 2865b1004104f003a227acef9ec23136260b9810..da7a4d8decc6a7c768f28460dccae4928f049387 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
@@ -7,7 +7,7 @@
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
-- name: Ensure GlusterFS client will reinstall if the PPA was just added.  # noqa 503
+- name: Ensure GlusterFS client will reinstall if the PPA was just added.  # noqa no-handler
   apt:
     name: "{{ item }}"
     state: absent
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index db82d5f11c06bbf9430da8bec0400aa86c99c666..1146188aaf65351c189cde55e69f375e160e1ba2 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -28,7 +28,7 @@
     name: "{{ gluster_volume_node_mount_dir }}"
     src: "{{ disk_volume_device_1 }}"
     fstype: xfs
-    state: mounted"
+    state: mounted
 
 # Setup/install tasks.
 - include_tasks: setup-RedHat.yml
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
index 855fe36bf5fbdb9d1a031afa6f3de4a10d0baa4d..104735903eec01361225be07b41e3a4efbc6d06a 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
@@ -7,7 +7,7 @@
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
-- name: Ensure GlusterFS will reinstall if the PPA was just added.  # noqa 503
+- name: Ensure GlusterFS will reinstall if the PPA was just added.  # noqa no-handler
   apt:
     name: "{{ item }}"
     state: absent
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
index e6b16e54a10537751e2a44739a8b1a6e9480399b..0ffd6f469f2e5fbbcbef40fe45e0aa8dc7577144 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
@@ -6,7 +6,7 @@
 - name: "Delete bootstrap Heketi."
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
   when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
-- name: "Ensure there is nothing left over."  # noqa 301
+- name: "Ensure there is nothing left over."
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
index 4c6dc130c15c8d6c59696a997ece9a780ee4c5de..e623576d1be3fe012d2a4b60c1ffd5f6a21616b9 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -14,7 +14,7 @@
 - name: "Copy topology configuration into container."
   changed_when: false
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
-- name: "Load heketi topology."  # noqa 503
+- name: "Load heketi topology."  # noqa no-handler
   when: "render.changed"
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
   register: "load_heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
index dc93d782877d4f58d97abf7c3d06e660f50624af..14ab97793991d5c7f2d39d693b89dde66a9636ea 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
@@ -18,7 +18,7 @@
 - name: "Provision database volume."
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
   when: "heketi_database_volume_exists is undefined"
-- name: "Copy configuration from pod."  # noqa 301
+- name: "Copy configuration from pod."
   become: true
   command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
 - name: "Get heketi volume ids."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
index f20af1fb939203941cbffedca4b81e26fe887978..f5f8e6a94728d92b29d82611c5f09585f60abb43 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -11,10 +11,10 @@
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
     mode: 0644
-- name: "Copy topology configuration into container."  # noqa 503
+- name: "Copy topology configuration into container."  # noqa no-handler
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
-- name: "Load heketi topology."  # noqa 503
+- name: "Load heketi topology."  # noqa no-handler
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
 - name: "Get heketi topology."
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
index ae98bd8c2543e1ee3d8940540cef97e104e1e51f..f3ca033200a42f3d2e3fe741a06d3a5e41441610 100644
--- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -22,7 +22,7 @@
   ignore_errors: true   # noqa ignore-errors
   changed_when: false
 
-- name: "Remove volume groups."  # noqa 301
+- name: "Remove volume groups."
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
@@ -30,7 +30,7 @@
   with_items: "{{ volume_groups.stdout_lines }}"
   loop_control: { loop_var: "volume_group" }
 
-- name: "Remove physical volume from cluster disks."  # noqa 301
+- name: "Remove physical volume from cluster disks."
   environment:
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
index 608b25de6ed042e69b156f07d826f368646f28fd..5b3553bf472c08b566b31de8c7bb5331dc8470fb 100644
--- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -1,43 +1,43 @@
 ---
-- name: Remove storage class.  # noqa 301
+- name: Remove storage class.
   command: "{{ bin_dir }}/kubectl delete storageclass gluster"
   ignore_errors: true  # noqa ignore-errors
-- name: Tear down heketi.  # noqa 301
+- name: Tear down heketi.
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
   ignore_errors: true  # noqa ignore-errors
-- name: Tear down heketi.  # noqa 301
+- name: Tear down heketi.
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
   ignore_errors: true  # noqa ignore-errors
 - name: Tear down bootstrap.
   include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
-- name: Ensure there is nothing left over.  # noqa 301
+- name: Ensure there is nothing left over.
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: Ensure there is nothing left over.  # noqa 301
+- name: Ensure there is nothing left over.
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: Tear down glusterfs.  # noqa 301
+- name: Tear down glusterfs.
   command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
   ignore_errors: true  # noqa ignore-errors
-- name: Remove heketi storage service.  # noqa 301
+- name: Remove heketi storage service.
   command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
   ignore_errors: true  # noqa ignore-errors
-- name: Remove heketi gluster role binding  # noqa 301
+- name: Remove heketi gluster role binding
   command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
   ignore_errors: true  # noqa ignore-errors
-- name: Remove heketi config secret  # noqa 301
+- name: Remove heketi config secret
   command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
   ignore_errors: true  # noqa ignore-errors
-- name: Remove heketi db backup  # noqa 301
+- name: Remove heketi db backup
   command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
   ignore_errors: true  # noqa ignore-errors
-- name: Remove heketi service account  # noqa 301
+- name: Remove heketi service account
   command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
   ignore_errors: true  # noqa ignore-errors
 - name: Get secrets
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index 2ce86d5c5ebb43b2bdb970f32275bb50d0d741d6..14a6c2769e9a7806e80cad18579009fdaa7204c0 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -16,13 +16,13 @@
         src: get_cinder_pvs.sh
         dest: /tmp
         mode: u+rwx
-    - name: Get PVs provisioned by in-tree cloud provider  # noqa 301
+    - name: Get PVs provisioned by in-tree cloud provider
       command: /tmp/get_cinder_pvs.sh
       register: pvs
     - name: Remove get_cinder_pvs.sh
       file:
         path: /tmp/get_cinder_pvs.sh
         state: absent
-    - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation  # noqa 301
+    - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
       command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
       loop: "{{ pvs.stdout_lines | list }}"
diff --git a/roles/container-engine/containerd/tasks/reset.yml b/roles/container-engine/containerd/tasks/reset.yml
index 5c551b6d97c0d9910fdd6b8b423a4e57ffe7e588..1788e4ea9ee40aa19a8662f51d45cb5ad16ac6ad 100644
--- a/roles/container-engine/containerd/tasks/reset.yml
+++ b/roles/container-engine/containerd/tasks/reset.yml
@@ -22,7 +22,6 @@
     name: containerd
     daemon_reload: true
     enabled: false
-    masked: true
     state: stopped
   tags:
     - reset_containerd
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index bdd300b2ad4f5daf77afa8b80273282ac4b0f51b..23cab0e8a46f6dd77702f23aa3ee254abe384b83 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -196,7 +196,7 @@
   register: service_start
 
 - name: cri-o | trigger service restart only when needed
-  service:  # noqa 503
+  service:
     name: crio
     state: restarted
   when:
diff --git a/roles/container-engine/cri-o/tasks/reset.yml b/roles/container-engine/cri-o/tasks/reset.yml
index 460382766f5c45a54937011edd70de1ef730398e..0005a38a6a8ee95008b34f60a043144b009f6abc 100644
--- a/roles/container-engine/cri-o/tasks/reset.yml
+++ b/roles/container-engine/cri-o/tasks/reset.yml
@@ -63,7 +63,6 @@
     name: crio
     daemon_reload: true
     enabled: false
-    masked: true
     state: stopped
   tags:
     - reset_crio
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index ae7b574d493214aa620694ef76ca7c8197679bbb..314430f27c426b41b687d1b17a64c9fcb9667b23 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -143,7 +143,7 @@
         state: started
       when: docker_task_result is not changed
   rescue:
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "Docker start failed. Try to remove our config"
     - name: remove kubespray generated config
       file:
diff --git a/roles/container-engine/docker/tasks/reset.yml b/roles/container-engine/docker/tasks/reset.yml
index 76d125b370722c9f30e06b502bd639d955f6b968..fb4f02c9b2c6e445cadc2e85a5083697580de22f 100644
--- a/roles/container-engine/docker/tasks/reset.yml
+++ b/roles/container-engine/docker/tasks/reset.yml
@@ -101,6 +101,6 @@
     - /etc/docker
   ignore_errors: true  # noqa ignore-errors
 
-- name: Docker | systemctl daemon-reload  # noqa 503
+- name: Docker | systemctl daemon-reload  # noqa no-handler
   systemd:
     daemon_reload: true
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index d800373002f398d73c149b4752a9cd96734fb6f6..3b7b67cf56552f0b3a0f34a3e4f28bfd8a3cef7f 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -26,7 +26,7 @@
   check_mode: no
 
 - name: check system search domains
-  # noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
+  # noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
   # Therefore -o pipefail is not applicable in this specific instance
   shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
   args:
diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml
index 0c040fee78fb48a22689223767ed095d1ad22306..7deff7752c26164724ff77cef4adb703ca473366 100644
--- a/roles/container-engine/docker/tasks/systemd.yml
+++ b/roles/container-engine/docker/tasks/systemd.yml
@@ -14,7 +14,7 @@
   when: http_proxy is defined or https_proxy is defined
 
 - name: get systemd version
-  # noqa 303 - systemctl is called intentionally here
+  # noqa command-instead-of-module - systemctl is called intentionally here
   shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
   args:
     executable: /bin/bash
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index c2f9ead02adfb90585998f479e8fb9f957983083..449589b4ce967ddeedbde0ee56706df8b21ed47b 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -1,7 +1,7 @@
 ---
 # The image_info_command depends on the Container Runtime and will output something like the following:
 # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
-- name: check_pull_required |  Generate a list of information about the images on a node  # noqa 305 image_info_command contains a pipe, therefore requiring shell
+- name: check_pull_required |  Generate a list of information about the images on a node  # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
   shell: "{{ image_info_command }}"
   register: docker_images
   changed_when: false
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index e956b6ff2722fbafa436d3e522adf4fdfd078259..426b0080407c912897bdfca9ba54f144cbbce8b6 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -18,7 +18,7 @@
       when:
         - not download_always_pull
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
 
     - name: download_container | Determine if image is in cache
@@ -68,7 +68,7 @@
         - not image_is_cached
 
     - name: download_container | Save and compress image
-      shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"  # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
+      shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"  # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
       delegate_to: "{{ download_delegate }}"
       delegate_facts: no
       register: container_save_status
@@ -108,7 +108,7 @@
         - download_force_cache
 
     - name: download_container | Load image into the local container registry
-      shell: "{{ image_load_command }}"  # noqa 305 image_load_command uses pipes, therefore requires shell
+      shell: "{{ image_load_command }}"  # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
       register: container_load_status
       failed_when: container_load_status is failed
       when:
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 9419f24aac9090fc4a3a449966ac5f2b82c7f333..587810d48159548427f668165a44bac7c1c1ecff 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -21,7 +21,7 @@
     - asserts
 
 - name: prep_download | On localhost, check if user has access to the container runtime without using sudo
-  shell: "{{ image_info_command_on_localhost }}"  # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
+  shell: "{{ image_info_command_on_localhost }}"  # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
   delegate_to: localhost
   connection: local
   run_once: true
@@ -57,7 +57,7 @@
     - asserts
 
 - name: prep_download | Register docker images info
-  shell: "{{ image_info_command }}"  # noqa 305 image_info_command contains pipe therefore requires shell
+  shell: "{{ image_info_command }}"  # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
   no_log: "{{ not (unsafe_show_logs|bool) }}"
   register: docker_images
   failed_when: false
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index 4bdd225fb078d27014adb22b04dd35d4a3340ac0..cfd0a33b01022a0c6fb263d0a7683f0696c9ccdc 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -1,5 +1,5 @@
 ---
-- name: Join Member | Add member to etcd-events cluster  # noqa 301 305
+- name: Join Member | Add member to etcd-events cluster
   command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index 6bc28f8610f4fdf41c54cda4f4ddaa46786d7c1d..1cc2abf4f7fde4b235a09a7cf151b4e051322bf2 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -1,5 +1,5 @@
 ---
-- name: Join Member | Add member to etcd cluster  # noqa 301 305
+- name: Join Member | Add member to etcd cluster
   command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index f806d3901a5d74e72b8b5c3dda18d1aa6645931e..22c5901e54b8cd28b1f4676fc901b0c627004338 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -24,14 +24,14 @@
     mode: 0640
   register: etcd_ca_cert
 
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar)  # noqa 503
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar)  # noqa no-handler
   command: update-ca-certificates
   when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
 
-- name: Gen_certs | update ca-certificates (RedHat)  # noqa 503
+- name: Gen_certs | update ca-certificates (RedHat)  # noqa no-handler
   command: update-ca-trust extract
   when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
 
-- name: Gen_certs | update ca-certificates (ClearLinux)  # noqa 503
+- name: Gen_certs | update ca-certificates (ClearLinux)  # noqa no-handler
   command: clrtrust add "{{ ca_cert_path }}"
   when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
diff --git a/roles/helm-apps/tasks/main.yml b/roles/helm-apps/tasks/main.yml
index 2dc2485bd92627a3934d09cb0a680e5280eaab18..9515f167ccecf81d34b3ae33af0e05c8d16dfe19 100644
--- a/roles/helm-apps/tasks/main.yml
+++ b/roles/helm-apps/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: Add Helm repositories
-  kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"
+  kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"  # noqa args[module]
   loop: "{{ repositories }}"
 
 - name: Update Helm repositories
@@ -15,5 +15,5 @@
     - helm_update
 
 - name: Install Helm Applications
-  kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"
+  kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"  # noqa args[module]
   loop: "{{ releases }}"
diff --git a/roles/kubernetes-apps/krew/tasks/krew.yml b/roles/kubernetes-apps/krew/tasks/krew.yml
index bbc4dbaadf6a1329d79b647b12fcfecff4567a7e..a8b52010b59fb90d0a37f855cd0ff41a2abedb00 100644
--- a/roles/kubernetes-apps/krew/tasks/krew.yml
+++ b/roles/kubernetes-apps/krew/tasks/krew.yml
@@ -16,7 +16,7 @@
     dest: "{{ local_release_dir }}/krew.yml"
     mode: 0644
 
-- name: Krew | Install krew  # noqa 301 305
+- name: Krew | Install krew  # noqa command-instead-of-shell
   shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml"
   environment:
     KREW_ROOT: "{{ krew_root_dir }}"
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index 25f9a7132652813a5b39580998664eca66bfe908..70cd7adc3f4e0cf108ea469fc76f5419c047014e 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -12,7 +12,7 @@
   run_once: true
 
 - name: kube-router | Wait for kube-router pods to be ready
-  command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601 ignore-errors
+  command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa ignore-errors
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
   retries: 30
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index 344860e8ebb64a11bd8d6a5c051a2e87e3321f0b..4df4783431ee3cef2213a5b92e671bda72015d31 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -100,5 +100,5 @@
     name: k8s-certs-renew.timer
     enabled: yes
     state: started
-    daemon-reload: "{{ k8s_certs_units is changed }}"
+    daemon_reload: "{{ k8s_certs_units is changed }}"
   when: auto_renew_certificates
diff --git a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
index 27c04ea95d1a38d92de7ef06a114425ecc11137d..4c33624e46f0775ca56ba560ecee39cf82552530 100644
--- a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
@@ -8,7 +8,7 @@
   register: kube_apiserver_manifest_replaced
   when: etcd_secret_changed|default(false)
 
-- name: "Pre-upgrade | Delete master containers forcefully"  # noqa 503
+- name: "Pre-upgrade | Delete master containers forcefully"  # noqa no-handler
   shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
   args:
     executable: /bin/bash
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index f91e7f459fbb14a2870279e607b1da05890f8375..0904ffca7219080eb70b0c8c07d6595d24741c09 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -35,9 +35,9 @@
     - node_labels is defined
     - node_labels is mapping
 
-- debug:  # noqa unnamed-task
+- debug:  # noqa name[missing]
     var: role_node_labels
-- debug:  # noqa unnamed-task
+- debug:  # noqa name[missing]
     var: inventory_node_labels
 
 - name: Set label to node
diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
index ca430cac45bd35a5dbbebd8eaff43e4111dc87f6..d8638ff2b0f4c4a453f4f516dd23d65d9c449383 100644
--- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
@@ -93,7 +93,7 @@
     - not (disable_host_nameservers | default(false))
 
 - name: NetworkManager | Check if host has NetworkManager
-  # noqa 303 Should we use service_facts for this?
+  # noqa command-instead-of-module - Should we use service_facts for this?
   command: systemctl is-active --quiet NetworkManager.service
   register: networkmanager_enabled
   failed_when: false
@@ -101,7 +101,7 @@
   check_mode: false
 
 - name: check systemd-resolved
-  # noqa 303 Should we use service_facts for this?
+  # noqa command-instead-of-module - Should we use service_facts for this?
   command: systemctl is-active systemd-resolved
   register: systemd_resolved_enabled
   failed_when: false
diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index 598399b9bc42b4c2885f4c391a6be307acdbae4e..6a2203ccad8e2fa93c15692f87dc684b9c9ab22a 100644
--- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -33,12 +33,12 @@
   changed_when: False
   register: fs_type
 
-- name: run growpart  # noqa 503
+- name: run growpart  # noqa no-handler
   command: growpart {{ device }} {{ partition }}
   when: growpart_needed.changed
   environment:
     LC_ALL: C
 
-- name: run xfs_growfs  # noqa 503
+- name: run xfs_growfs  # noqa no-handler
   command: xfs_growfs {{ root_device }}
   when: growpart_needed.changed and 'XFS' in fs_type.stdout
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index 61645526d94e1f0fd0d65b746efc27c8d5a4cc70..471518d9f222e865407b3a15f52f248bdc93d2ab 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -5,7 +5,7 @@
 - name: Calico-rr | Configuring node tasks
   include_tasks: update-node.yml
 
-- name: Calico-rr | Set label for route reflector  # noqa 301
+- name: Calico-rr | Set label for route reflector
   command: >-
     {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
     'i-am-a-route-reflector=true' --overwrite
diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml
index 7070076b1775b873cdaea06709dc1b4b78896637..930429139ca7154d4330cff29dffbf5918541c07 100644
--- a/roles/network_plugin/calico/rr/tasks/update-node.yml
+++ b/roles/network_plugin/calico/rr/tasks/update-node.yml
@@ -6,7 +6,7 @@
     set_fact:
       retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
 
-  - name: Calico | Set label for route reflector  # noqa 301 305
+  - name: Calico | Set label for route reflector  # noqa command-instead-of-shell
     shell: "{{ bin_dir }}/calicoctl.sh label node  {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
     changed_when: false
     register: calico_rr_id_label
@@ -29,7 +29,7 @@
         {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
         { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
 
-  - name: Calico-rr | Configure route reflector  # noqa 301 305
+  - name: Calico-rr | Configure route reflector  # noqa command-instead-of-shell
     shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
     args:
       stdin: "{{ calico_rr_node_patched | to_json }}"
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index dd6b71ae09a51d844cdbb1ba92d5008691949098..7d509f90ee5cbe21ff856268a68a1572fbd0dd0d 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -72,7 +72,7 @@
   when: calico_datastore == "etcd"
 
 - name: Calico | Check if calico network pool has already been configured
-  # noqa 306 - grep will exit 1 if no match found
+  # noqa risky-shell-pipe - grep will exit 1 if no match found
   shell: >
     {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
   args:
@@ -95,7 +95,7 @@
     - calico_pool_cidr is defined
 
 - name: Calico | Check if calico IPv6 network pool has already been configured
-  # noqa 306 - grep will exit 1 if no match found
+  # noqa risky-shell-pipe - grep will exit 1 if no match found
   shell: >
     {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
   args:
diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
index 863e132414af1d53ea96f5fdae548d87ca12f80f..5e6010ced38edd98b5dfd480b89ae1740ace73c0 100644
--- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
@@ -1,6 +1,6 @@
 ---
-- name: Calico | Set label for groups nodes  # noqa 301 305
-  shell: "{{ bin_dir }}/calicoctl.sh label node  {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
+- name: Calico | Set label for groups nodes
+  command: "{{ bin_dir }}/calicoctl.sh label node  {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
   changed_when: false
   register: calico_group_id_label
   until: calico_group_id_label is succeeded
diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml
index b977c21772b2a918ae0a314c6713ec250b721cc0..75868ba17a94273da89a919aa2fa94e3554122f2 100644
--- a/roles/network_plugin/cilium/tasks/apply.yml
+++ b/roles/network_plugin/cilium/tasks/apply.yml
@@ -11,7 +11,7 @@
   when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
 
 - name: Cilium | Wait for pods to run
-  command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
+  command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa literal-compare
   register: pods_not_ready
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 1944f50d274502677c0216d823b71ecc2d134f2a..8c6deda5cfbfb73587d52d44096c3980beebf10a 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -43,7 +43,6 @@
     - has_quorum
 
 - name: Delete old certificates
-  # noqa 302 ignore-error - rm is ok here for now
   shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
   with_items: "{{ groups['broken_etcd'] }}"
   register: delete_old_cerificates
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index 86096fed911afb9ab52316710021d1d161d68f9e..388962875f15ef1745b498bfea5899a174e1b55d 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -26,7 +26,7 @@
     path: "{{ etcd_data_dir }}"
     state: absent
 
-- name: Restore etcd snapshot  # noqa 301 305
+- name: Restore etcd snapshot  # noqa command-instead-of-shell
   shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
   environment:
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index b45e809f8d5e794baf2e2312510bcdc0f41e7919..31d959c7c1fa6044ae7c37f091aa4bee5135e7ad 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -9,7 +9,7 @@
   changed_when: false
   run_once: true
 
-- name: remove-node | Drain node except daemonsets resource  # noqa 301
+- name: remove-node | Drain node except daemonsets resource
   command: >-
     {{ kubectl }} drain
       --force
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index f6394c366444efacb7bdb0c895c1459ac4097cb5..2d06b5c438bcdc0d342ac5f3d29d0c62792ef23d 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -38,7 +38,7 @@
   tags:
     - docker
 
-- name: reset | systemctl daemon-reload  # noqa 503
+- name: reset | systemctl daemon-reload  # noqa no-handler
   systemd:
     daemon_reload: true
   when: services_removed.changed
@@ -174,7 +174,7 @@
       tags:
         - services
 
-- name: reset | gather mounted kubelet dirs  # noqa 301
+- name: reset | gather mounted kubelet dirs
   shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
   args:
     executable: /bin/bash
@@ -185,7 +185,7 @@
   tags:
     - mounts
 
-- name: reset | unmount kubelet dirs  # noqa 301
+- name: reset | unmount kubelet dirs
   command: umount -f {{ item }}
   with_items: "{{ mounted_dirs.stdout_lines }}"
   register: umount_dir
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index a6c70edbda4a875cc5776c40631e4c282a9cb29f..6a53862c18d406697caefe347ff131df5dce2bbf 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -29,11 +29,11 @@
       register: patch_kube_proxy_state
       when: current_kube_proxy_state.stdout | trim | lower != "linux"
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "{{ patch_kube_proxy_state.stdout_lines }}"
       when: patch_kube_proxy_state is not skipped
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "{{ patch_kube_proxy_state.stderr_lines }}"
       when: patch_kube_proxy_state is not skipped
   tags: init
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index 832f9dd7f108486221788e68d3f3e8d92eaf908b..5ac47b00c18c916bd26e2c331e8ef12271a04f19 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -32,7 +32,7 @@
   when:
     - item.value.converted|bool
 
-- name: Resize images  # noqa 301
+- name: Resize images
   command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
   loop: "{{ images|dict2items }}"
 
@@ -43,16 +43,16 @@
     dest: "{{ images_dir }}/Dockerfile"
     mode: 0644
 
-- name: Create docker images for each OS  # noqa 301
+- name: Create docker images for each OS
   command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
   loop: "{{ images|dict2items }}"
 
-- name: docker login  # noqa 301
+- name: docker login
   command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
 
-- name: docker push image  # noqa 301
+- name: docker push image
   command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
   loop: "{{ images|dict2items }}"
 
-- name: docker logout  # noqa 301
+- name: docker logout
   command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index 453c1139d7ab35b08c6ace9fa87e19082fa5d2a8..a4628f4244fb56684bc0c052eb5e97c98a51eb4a 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -20,6 +20,6 @@
 
   - name: Template the inventory
     template:
-      src: ../templates/inventory-aws.j2  # noqa 404 CI inventory templates are not in role_path
+      src: ../templates/inventory-aws.j2  # noqa no-relative-paths - CI inventory templates are not in role_path
       dest: "{{ inventory_path }}"
       mode: 0644
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index f95cbe5162b473d3984f1d37513be06f1c161f18..b4d2125ac5e9fcdba395de346f6fce1299df0299 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -86,7 +86,7 @@
 
     - name: Template the inventory
       template:
-        src: ../templates/inventory-do.j2  # noqa 404 CI templates are not in role_path
+        src: ../templates/inventory-do.j2  # noqa no-relative-paths - CI templates are not in role_path
         dest: "{{ inventory_path }}"
         mode: 0644
       when: state == 'present'
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index dae55a2c5dba84fe2a103632b2a4430aeef6437c..ccb4bce1da045fa0b4236a905566533b361b8146 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -28,7 +28,7 @@
           {%- endif -%}
 
     - name: Create gce instances
-      google.cloud.gcp_compute_instance:
+      google.cloud.gcp_compute_instance:  # noqa args[module] - Probably doesn't work
         instance_names: "{{ instance_names }}"
         machine_type: "{{ cloud_machine_type }}"
         image: "{{ cloud_image | default(omit) }}"
@@ -51,7 +51,7 @@
         groupname: "waitfor_hosts"
       with_items: '{{ gce.instance_data }}'
 
-    - name: Template the inventory  # noqa 404 CI inventory templates are not in role_path
+    - name: Template the inventory  # noqa no-relative-paths - CI inventory templates are not in role_path
       template:
         src: ../templates/inventory-gce.j2
         dest: "{{ inventory_path }}"
@@ -64,7 +64,7 @@
         mode: 0755
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
-    - name: Template fake hosts group vars  # noqa 404 CI templates are not in role_path
+    - name: Template fake hosts group vars  # noqa no-relative-paths - CI templates are not in role_path
       template:
         src: ../templates/fake_hosts.yml.j2
         dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index b88abea1c7a7ef0530ee2f374d1e0d9a2a26518b..4d118711b75ee774866bd879c6edf6a92bf42706 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -19,7 +19,7 @@
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
-    - name: stop gce instances
+    - name: stop gce instances  # noqa args[module] - Probably doesn't work
       google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
@@ -33,7 +33,7 @@
       poll: 3
       register: gce
 
-    - name: delete gce instances
+    - name: delete gce instances  # noqa args[module] - Probably doesn't work
       google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index 2f5c9d897172ddaa7291c6a39d42699d3962c18f..bf0ffd9e9df3969639ed77850962e7ed59daca11 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -56,7 +56,7 @@
       no_log: True
       failed_when: false
 
-    - name: Apply the lifecycle rules  # noqa 301
+    - name: Apply the lifecycle rules
       command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
       changed_when: false
       environment:
@@ -77,5 +77,5 @@
       failed_when: false
       no_log: True
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index a0a09a411aa577a6b4e362e85ca37343e0350685..961df9bc45fa17e66719ecfe47a7a4c9fda9f94b 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -12,7 +12,7 @@
     delay: 5
     until: apiserver_response is success
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ apiserver_response.json }}"
 
   - name: Check API servers version
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index 1c3b977229d1411ae42548d7fb7139ab4f51a85b..bb2f2832337b4defd406832b0d323d5de9b1a82e 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-  - import_role:  # noqa unnamed-task
+  - import_role:  # noqa name[missing]
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_nodes
     no_log: true
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ get_nodes.stdout.split('\n') }}"
 
   - name: Check that all nodes are running and ready
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index 46392d1d996aa03aee79e1f133b109d7a55a834d..d1dc17c2adefdd2bb1134ca79a6666c22bc7f64d 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-  - import_role:  # noqa unnamed-task
+  - import_role:  # noqa name[missing]
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_pods
     no_log: true
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
@@ -44,6 +44,6 @@
     register: get_pods
     no_log: true
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index c736ac730812adc4e81b9d04497e346908f99396..78a1021d60b1ddbebbac80cd82f8b0c90edf44c7 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -23,7 +23,7 @@
       register: get_csr
       changed_when: false
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "{{ get_csr.stdout.split('\n') }}"
 
     - name: Check there are csrs
@@ -63,7 +63,7 @@
       when: get_csr.stdout_lines | length > 0
       changed_when: certificate_approve.stdout
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         msg: "{{ certificate_approve.stdout.split('\n') }}"
 
     when:
@@ -114,7 +114,7 @@
     - agnhost1
     - agnhost2
 
-  - import_role:  # noqa unnamed-task
+  - import_role:  # noqa name[missing]
       name: cluster-dump
 
   - name: Check that all pods are running and ready
@@ -137,7 +137,7 @@
     register: pods
     no_log: true
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
@@ -162,7 +162,7 @@
     register: get_pods
     no_log: true
 
-  - debug:  # noqa unnamed-task
+  - debug:  # noqa name[missing]
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Set networking facts
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 37cf85131e5e3b20aa0f605519e8c9bf1be94f2e..50a8136d29380663e020643654675398529e6391 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -26,7 +26,7 @@
         bin_dir: "/usr/local/bin"
       when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-    - import_role:  # noqa unnamed-task
+    - import_role:  # noqa name[missing]
         name: cluster-dump
 
     - name: Wait for netchecker server
@@ -60,7 +60,7 @@
         - netchecker-agent-hostnet
       when: not nca_pod is success
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         var: nca_pod.stdout_lines
       when: inventory_hostname == groups['kube_control_plane'][0]
 
@@ -96,7 +96,7 @@
       when:
         - agents.content != '{}'
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         var: ncs_pod
       run_once: true
 
@@ -130,7 +130,7 @@
         - agents.content is defined
         - agents.content[0] == '{'
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         var: agents_check_result
       delegate_to: "{{ groups['kube_control_plane'][0] }}"
       run_once: true
@@ -147,7 +147,7 @@
         - connectivity_check.content is defined
         - connectivity_check.content[0] == '{'
 
-    - debug:  # noqa unnamed-task
+    - debug:  # noqa name[missing]
         var: connectivity_check_result
       delegate_to: "{{ groups['kube_control_plane'][0] }}"
       run_once: true