diff --git a/.ansible-lint b/.ansible-lint
index e1909e9666e553060a7747cc82eaafdfedb22add..048a89787a90c035ba05fec2a282d828aff94ae8 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -18,3 +18,13 @@ skip_list:
   # While it can be useful to have these metadata available, they are also available in the existing documentation.
   # (Disabled in May 2019)
   - '701'
+
+  # [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
+  # Meta roles in Kubespray don't need proper names
+  # (Disabled in June 2021)
+  - 'role-name'
+
+  # [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
+  # In Kubespray we use variables that use camelCase to match their k8s counterparts
+  # (Disabled in June 2021)
+  - 'var-naming'
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 00278bb383f3a7a34e1bbc35b8eb8f9200ad6c40..e6aae01acc7223a9a95e6f5ea6381b8bf76d52a2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -37,6 +37,7 @@ variables:
 before_script:
   - ./tests/scripts/rebase.sh
   - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
+  - python -m pip uninstall -y ansible
   - python -m pip install -r tests/requirements.txt
   - mkdir -p /.ssh
 
diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index 8d4d41a7f2ca34e86738a0ea581c2e0467190195..34b6e220767602bf4b8b0379b7db5acd99390f97 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -53,6 +53,7 @@ tox-inventory-builder:
     - ./tests/scripts/rebase.sh
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
   script:
     - pip3 install tox
diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml
index 445393973fa0ec7c58206f1b6dd0cc373d83f371..92cf7b7db8cdc0fa2b1d0337543f1a4605d7cf20 100644
--- a/.gitlab-ci/vagrant.yml
+++ b/.gitlab-ci/vagrant.yml
@@ -11,6 +11,7 @@ molecule_tests:
     - tests/scripts/rebase.sh
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
     - ./tests/scripts/vagrant_clean.sh
   script:
@@ -31,6 +32,7 @@ molecule_tests:
   before_script:
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
     - ./tests/scripts/vagrant_clean.sh
   script:
diff --git a/ansible_version.yml b/ansible_version.yml
index da19e96987a551ebb18a50b2329ef80817fd0db7..cc2bb4134fdddf5fc9a3ec41b7c5e4241cccc4e7 100644
--- a/ansible_version.yml
+++ b/ansible_version.yml
@@ -4,6 +4,7 @@
   become: no
   vars:
     minimal_ansible_version: 2.9.0
+    minimal_ansible_version_2_10: 2.10.11
     maximal_ansible_version: 2.11.0
     ansible_connection: local
   tasks:
@@ -16,6 +17,17 @@
       tags:
         - check
 
+    - name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
+      assert:
+        msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
+        that:
+          - ansible_version.string is version(minimal_ansible_version_2_10, ">=")
+          - ansible_version.string is version(maximal_ansible_version, "<")
+      when:
+        - ansible_version.string is version('2.10.0', ">=")
+      tags:
+        - check
+
     - name: "Check that python netaddr is installed"
       assert:
         msg: "Python netaddr is not present"
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index ccc5e219a7a80d8adfe2642a8543fe1a0e2d3581..6176a34e30101a55155453a006ab822cd3549a6d 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -12,3 +12,4 @@
   template:
     src: inventory.j2
     dest: "{{ playbook_dir }}/inventory"
+    mode: 0644
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index 6ba7d5a873375626e8691bb68bd9f4796a5e97b4..4c80c9a546a1e7768e0bdea3a609fae86fef85e4 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -22,8 +22,10 @@
   template:
     src: inventory.j2
     dest: "{{ playbook_dir }}/inventory"
+    mode: 0644
 
 - name: Generate Load Balancer variables
   template:
     src: loadbalancer_vars.j2
     dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
+    mode: 0644
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 489250a98bf312010b3caed4b56568da88f7e911..294ee96fc86d2fa8c2dd8a7b7676b0907993933e 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -8,11 +8,13 @@
     path: "{{ base_dir }}"
     state: directory
     recurse: true
+    mode: 0755
 
 - name: Store json files in base_dir
   template:
     src: "{{ item }}"
     dest: "{{ base_dir }}/{{ item }}"
+    mode: 0644
   with_items:
     - network.json
     - storage.json
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 5b7c77e497c287fda5f48b5ff350e999f9a175fd..247a0a8e98e0dc1dbfbedd1ee0b636bae18d3d74 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -35,6 +35,7 @@
       path-exclude=/usr/share/doc/*
       path-include=/usr/share/doc/*/copyright
     dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
+    mode: 0644
   when:
     - ansible_os_family == 'Debian'
 
@@ -63,6 +64,7 @@
   copy:
     content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
     dest: "/etc/sudoers.d/{{ distro_user }}"
+    mode: 0640
 
 - name: Add my pubkey to "{{ distro_user }}" user authorized keys
   authorized_key:
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
index f259c7f071b426be7df9681143d04ec611d68bad..c2d312302638befa9613216da0e76bc5cf6fe2e3 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
@@ -11,6 +11,7 @@
     state: directory
     owner: "{{ k8s_deployment_user }}"
     group: "{{ k8s_deployment_user }}"
+    mode: 0700
 
 - name: Configure sudo for deployment user
   copy:
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 11269a6e7ba1a7372599fc9ab90d25e5640c29e8..0a58598505d8e511fa544ce62782f45b12fc7d8c 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -82,6 +82,7 @@
   template:
     dest: "{{ gluster_mount_dir }}/.test-file.txt"
     src: test-file.txt
+    mode: 0644
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Unmount glusterfs
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml
deleted file mode 100644
index 3646ff4200765586adfc344a3cc77ba77c00b080..0000000000000000000000000000000000000000
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: all
-
-  roles:
-    - role_under_test
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index 93b473295a9634397855c5fa08943b39827ce24d..8d03ffc2fc1f12fd7f15242d79ff10ddceb7abe5 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -1,7 +1,10 @@
 ---
 - name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
   become: true
-  template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
+  template:
+    src: "heketi-bootstrap.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
+    mode: 0640
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
index 07e86237cec15ba5d3c7d405a4ec0d47f28b12b4..4c6dc130c15c8d6c59696a997ece9a780ee4c5de 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -10,6 +10,7 @@
   template:
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
+    mode: 0644
 - name: "Copy topology configuration into container."
   changed_when: false
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
index 5f00e28aa8145ec2afeb4f3380c069df6b2bfff9..3409cf95785254d79134880ccc2b5d3ec492e861 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -1,6 +1,9 @@
 ---
 - name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
-  template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
+  template:
+    src: "glusterfs-daemonset.json.j2"
+    dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
+    mode: 0644
   become: true
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
@@ -27,7 +30,10 @@
   delay: 5
 
 - name: "Kubernetes Apps | Lay Down Heketi Service Account"
-  template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
+  template:
+    src: "heketi-service-account.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-service-account.json"
+    mode: 0644
   become: true
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Service Account"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 7b6d37d24aa60bbd6ededea0f0f16d0b72819187..9a6ce55b2560a3ac642248ec13843c584321d00f 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -4,6 +4,7 @@
   template:
     src: "heketi-deployment.json.j2"
     dest: "{{ kube_config_dir }}/heketi-deployment.json"
+    mode: 0644
   register: "rendering"
 
 - name: "Kubernetes Apps | Install and configure Heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
index 3615f7c6d4329725031d0b7bf1bf14902995e989..3249c87b4835af4ff5a94e88662f1da4b5d26d1a 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -5,7 +5,7 @@
   changed_when: false
 
 - name: "Kubernetes Apps | Deploy cluster role binding."
-  when: "clusterrolebinding_state.stdout == \"\""
+  when: "clusterrolebinding_state.stdout | length > 0"
   command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
 
 - name: Get clusterrolebindings again
@@ -15,7 +15,7 @@
 
 - name: Make sure that clusterrolebindings are present now
   assert:
-    that: "clusterrolebinding_state.stdout != \"\""
+    that: "clusterrolebinding_state.stdout | length > 0"
     msg: "Cluster role binding is not present."
 
 - name: Get the heketi-config-secret secret
@@ -28,9 +28,10 @@
   template:
     src: "heketi.json.j2"
     dest: "{{ kube_config_dir }}/heketi.json"
+    mode: 0644
 
 - name: "Deploy Heketi config secret"
-  when: "secret_state.stdout == \"\""
+  when: "secret_state.stdout | length > 0"
   command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
 
 - name: Get the heketi-config-secret secret again
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
index 210930804a5c93d03e3ea4f0718bf5e8911ef143..055e179a34b6b1f2b568ff75cc0763a92c0e507a 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
@@ -2,7 +2,10 @@
 - name: "Kubernetes Apps | Lay Down Heketi Storage"
   become: true
   vars: { nodes: "{{ groups['heketi-node'] }}" }
-  template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
+  template:
+    src: "heketi-storage.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-storage.json"
+    mode: 0644
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Storage"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index 5bf3e3c4d54be6532c3f0c889d6f57223b196d28..3380a612f3caf18d7f8446069346671cb598914b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -16,6 +16,7 @@
   template:
     src: "storageclass.yml.j2"
     dest: "{{ kube_config_dir }}/storageclass.yml"
+    mode: 0644
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Storace Class"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
index 4430a55926a97982c750bd12b13f4bb4edb20aa0..f20af1fb939203941cbffedca4b81e26fe887978 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -10,6 +10,7 @@
   template:
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
+    mode: 0644
 - name: "Copy topology configuration into container."  # noqa 503
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
index 43a7b4916f91d625bb24e9f793ae2f2f75a2c3e1..ae98bd8c2543e1ee3d8940540cef97e104e1e51f 100644
--- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -19,7 +19,7 @@
   become: true
   shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
   register: "volume_groups"
-  ignore_errors: true
+  ignore_errors: true   # noqa ignore-errors
   changed_when: false
 
 - name: "Remove volume groups."  # noqa 301
@@ -35,7 +35,7 @@
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
   command: "pvremove {{ disk_volume_device_1 }} --yes"
-  ignore_errors: true
+  ignore_errors: true   # noqa ignore-errors
 
 - name: "Remove lvm utils (RedHat)"
   become: true
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
index baf25fcb72056bdb85076bc273a1b9dd28c8e8ed..608b25de6ed042e69b156f07d826f368646f28fd 100644
--- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -1,51 +1,51 @@
 ---
-- name: "Remove storage class."  # noqa 301
+- name: Remove storage class.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete storageclass gluster"
-  ignore_errors: true
-- name: "Tear down heketi."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down heketi.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
-  ignore_errors: true
-- name: "Tear down heketi."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down heketi.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
-  ignore_errors: true
-- name: "Tear down bootstrap."
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down bootstrap.
   include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
-- name: "Ensure there is nothing left over."  # noqa 301
+- name: Ensure there is nothing left over.  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Ensure there is nothing left over."  # noqa 301
+- name: Ensure there is nothing left over.  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Tear down glusterfs."  # noqa 301
+- name: Tear down glusterfs.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
-  ignore_errors: true
-- name: "Remove heketi storage service."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi storage service.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
-  ignore_errors: true
-- name: "Remove heketi gluster role binding"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi gluster role binding  # noqa 301
   command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
-  ignore_errors: true
-- name: "Remove heketi config secret"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi config secret  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
-  ignore_errors: true
-- name: "Remove heketi db backup"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi db backup  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
-  ignore_errors: true
-- name: "Remove heketi service account"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi service account  # noqa 301
   command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
-  ignore_errors: true
-- name: "Get secrets"
+  ignore_errors: true  # noqa ignore-errors
+- name: Get secrets
   command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
   register: "secrets"
   changed_when: false
-- name: "Remove heketi storage secret"
+- name: Remove heketi storage secret
   vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
   command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
   when: "storage_query is defined"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
diff --git a/docs/ansible.md b/docs/ansible.md
index d8ca5a65755ba07caf50009bd5734718c616df89..30f0e13f391931251aaef19c1a5c1e4247788519 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -187,3 +187,28 @@ For more information about Ansible and bastion hosts, read
 ## Mitogen
 
 You can use [mitogen](mitogen.md) to speed up kubespray.
+
+## Beyond ansible 2.9
+
+Ansible project has decided, in order to ease their maintenance burden, to split between
+two projects which are now joined under the Ansible umbrella.
+
+Ansible-base (2.10.x branch) will contain just the ansible language implementation while
+ansible modules that were previously bundled into a single repository will be part of the
+ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
+that explains in detail the need and the evolution plan.
+
+**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
+You first need to uninstall your old ansible (pre 2.10) version and install the new one.
+
+```ShellSession
+pip uninstall ansible
+cd kubespray/
+pip install -U .
+```
+
+**Note:** some changes needed to support ansible 2.10+ are not backwards compatible with 2.9
+Kubespray needs to evolve and keep pace with upstream ansible and will be forced to eventually
+drop 2.9 support. Kubespray CIs use only the ansible version specified in the `requirements.txt`
+and while the `ansible_version.yml` may allow older versions to be used, these are not
+exercised in the CI and compatibility is not guaranteed.
diff --git a/mitogen.yml b/mitogen.yml
index 7e8686a447eff13e4b994e1cd25e37163d4faf1b..b39075f139dc13e846e610d2a4e46fb42b0bbfca 100644
--- a/mitogen.yml
+++ b/mitogen.yml
@@ -5,7 +5,7 @@
 - hosts: localhost
   strategy: linear
   vars:
-    mitogen_version: 0.2.9
+    mitogen_version: 0.3.0rc1
     mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
     ansible_connection: local
   tasks:
@@ -13,6 +13,7 @@
       file:
         path: "{{ item }}"
         state: directory
+        mode: 0755
       become: false
       loop:
         - "{{ playbook_dir }}/plugins/mitogen"
@@ -40,3 +41,4 @@
         section: defaults
         option: strategy
         value: mitogen_linear
+        mode: 0644
diff --git a/recover-control-plane.yml b/recover-control-plane.yml
index 03d573d3bb0f0f37491f78bb57c22960daeec388..4d08f3a9024ffb0bad0ed184b4c2984f28a75710 100644
--- a/recover-control-plane.yml
+++ b/recover-control-plane.yml
@@ -12,13 +12,13 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: "{{ groups['etcd'] | first }}"
+- hosts: etcd[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
     - { role: recover_control_plane/etcd }
 
-- hosts: "{{ groups['kube_control_plane'] | first }}"
+- hosts: kube_control_plane[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
@@ -26,7 +26,7 @@
 
 - include: cluster.yml
 
-- hosts: "{{ groups['kube_control_plane'] }}"
+- hosts: kube_control_plane
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/requirements.txt b/requirements.txt
index e38a00257ad7302218e1fbe20431ae0393bb1a63..09669bdb8873ea8834179924cbc6347061bce915 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
-ansible==2.9.20
+ansible==3.4.0
+ansible-base==2.10.11
 cryptography==2.8
 jinja2==2.11.3
 netaddr==0.7.19
diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml
index c6158dbce7ccb5e18a6db582592cc217485672dc..d638e539e4f603919afd4600d2698d2357f85171 100644
--- a/roles/bastion-ssh-config/tasks/main.yml
+++ b/roles/bastion-ssh-config/tasks/main.yml
@@ -19,3 +19,4 @@
   template:
     src: ssh-bastion.conf
     dest: "{{ playbook_dir }}/ssh-bastion.conf"
+    mode: 0640
diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml
index f6a57b9d25fd9a004b8fdab82b30a3e92546e422..a0f4f588265a4e0bd28d0db83727632faaeea12b 100644
--- a/roles/bootstrap-os/tasks/bootstrap-centos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml
@@ -12,6 +12,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
@@ -32,6 +33,7 @@
     section: "{{ item }}"
     option: enabled
     value: "1"
+    mode: 0644
   with_items:
     - ol7_latest
     - ol7_addons
@@ -56,6 +58,7 @@
     section: "ol{{ ansible_distribution_major_version }}_addons"
     option: "{{ item.option }}"
     value: "{{ item.value }}"
+    mode: 0644
   with_items:
     - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
     - { option: "enabled", value: "1" }
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
index e999d05064b9d3952cd12720fc3dba071716a919..1a222f6644b29f4462512887ac60ddcd2aa5b89b 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
@@ -11,7 +11,7 @@
 - name: Remove podman network cni
   raw: "podman network rm podman"
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when: need_bootstrap.rc != 0
 
 - name: Clean up possible pending packages on fedora coreos
@@ -43,7 +43,7 @@
 - name: Reboot immediately for updated ostree, please run playbook again if failed first time.
   raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   ignore_unreachable: yes
   when: need_bootstrap.rc != 0
 
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
index cfdd76e3a62c6c97a30949d6c73e76ebef41d1c5..1613173156e1784cfed18e813bb60cc855d9ccc9 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
@@ -17,6 +17,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
index 5b2b6ab94e5329556cb948f0e0ac56a0a7f75003..b305811201788032fd2c1017b75d197d2e7b7aed 100644
--- a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
@@ -10,7 +10,7 @@
   register: stat_result
 
 - name: Create the /etc/sysconfig/proxy empty file
-  file:
+  file:  # noqa risky-file-permissions
     path: /etc/sysconfig/proxy
     state: touch
   when:
diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
index 5a2bbf55386b74071839b70c34c453e6fd30bf5f..c6bf43ba4a8f70ec699d7cfa56db20af869d5d79 100644
--- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
@@ -12,6 +12,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
@@ -19,7 +20,7 @@
   command: /sbin/subscription-manager status
   register: rh_subscription_status
   changed_when: "rh_subscription_status != 0"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
 
 - name: RHEL subscription Organization ID/Activation Key registration
@@ -35,12 +36,13 @@
       service_level_agreement: "{{ rh_subscription_sla }}"
       sync: true
   notify: RHEL auto-attach subscription
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
   when:
     - rh_subscription_org_id is defined
     - rh_subscription_status.changed
 
+# this task has no_log set to prevent logging security sensitive information such as subscription passwords
 - name: RHEL subscription Username/Password registration
   redhat_subscription:
     state: present
@@ -54,8 +56,9 @@
       service_level_agreement: "{{ rh_subscription_sla }}"
       sync: true
   notify: RHEL auto-attach subscription
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
+  no_log: true
   when:
     - rh_subscription_username is defined
     - rh_subscription_status.changed
diff --git a/roles/container-engine/containerd/molecule/default/converge.yml b/roles/container-engine/containerd/molecule/default/converge.yml
index b70dabf8923ab4185a338626befcf5902616004d..26ff82a9ebb826d646bee380e547b32551afebdd 100644
--- a/roles/container-engine/containerd/molecule/default/converge.yml
+++ b/roles/container-engine/containerd/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: containerd
+    - role: container-engine/containerd
diff --git a/roles/container-engine/containerd/tasks/containerd_repo.yml b/roles/container-engine/containerd/tasks/containerd_repo.yml
index d62468f0c3e079c5e679af21707bdacaa580faff..b26bc84c75a4b5dee5be47299964d381300a23fe 100644
--- a/roles/container-engine/containerd/tasks/containerd_repo.yml
+++ b/roles/container-engine/containerd/tasks/containerd_repo.yml
@@ -23,12 +23,14 @@
   template:
     src: "fedora_containerd.repo.j2"
     dest: "{{ yum_repo_dir }}/containerd.repo"
+    mode: 0644
   when: ansible_distribution == "Fedora"
 
 - name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
   template:
     src: "rh_containerd.repo.j2"
     dest: "{{ yum_repo_dir }}/containerd.repo"
+    mode: 0644
   when:
     - ansible_os_family == "RedHat"
     - ansible_distribution not in ["Fedora", "Amazon"]
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index 5becf1f3d33043351a3a7ffdd6f5ccf83e73f44c..e4ba9983bffe627717878940996cfc0a3b371268 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -58,11 +58,13 @@
   file:
     path: /etc/systemd/system/containerd.service.d
     state: directory
+    mode: 0755
 
 - name: Write containerd proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
+    mode: 0644
   notify: restart containerd
   when: http_proxy is defined or https_proxy is defined
 
@@ -116,7 +118,7 @@
     - not is_ostree
     - containerd_package_info.pkgs|length > 0
 
-- include_role:
+- include_role:  # noqa unnamed-task
     name: container-engine/crictl
 
 # you can sometimes end up in a state where everything is installed
diff --git a/roles/container-engine/cri-o/molecule/default/converge.yml b/roles/container-engine/cri-o/molecule/default/converge.yml
index fdb8fb60005527e253402ff93625e7af471a639e..5235ae3309e65a53c6be460e0adfa9b5dfe1de13 100644
--- a/roles/container-engine/cri-o/molecule/default/converge.yml
+++ b/roles/container-engine/cri-o/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: cri-o
+    - role: container-engine/cri-o
diff --git a/roles/container-engine/cri-o/tasks/crio_repo.yml b/roles/container-engine/cri-o/tasks/crio_repo.yml
index 23447ee43f5cdc283c8e6019138e3bed69f9cead..b0ca20725d3c1afccd4def4f85c6d3f2b7d3c26f 100644
--- a/roles/container-engine/cri-o/tasks/crio_repo.yml
+++ b/roles/container-engine/cri-o/tasks/crio_repo.yml
@@ -53,6 +53,7 @@
     option: enabled
     value: "0"
     backup: yes
+    mode: 0644
   when:
     - ansible_distribution in ["Amazon"]
     - amzn2_extras_file_stat.stat.exists
@@ -119,6 +120,7 @@
     section: "{{ item.section }}"
     option: enabled
     value: 1
+    mode: 0644
   become: true
   when: is_ostree
   loop:
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index c152a5232ec93dda759140674a496a3f57569480..3fda1aeb4870657e187d5961b4d0ede0c5a01977 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -46,7 +46,7 @@
   import_tasks: "crio_repo.yml"
   when: crio_add_repos
 
-- include_role:
+- include_role:  # noqa unnamed-task
     name: container-engine/crictl
 
 - name: Build a list of crio runtimes with Katacontainers runtimes
@@ -69,11 +69,13 @@
   file:
     path: "{{ item }}"
     state: directory
+    mode: 0755
 
 - name: Install cri-o config
   template:
     src: crio.conf.j2
     dest: /etc/crio/crio.conf
+    mode: 0644
   register: config_install
 
 - name: Add skopeo pkg to install
@@ -129,6 +131,7 @@
   copy:
     src: mounts.conf
     dest: /etc/containers/mounts.conf
+    mode: 0644
   when:
     - ansible_os_family == 'RedHat'
   notify: restart crio
@@ -147,6 +150,7 @@
     section: storage.options.overlay
     option: mountopt
     value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
+    mode: 0644
 
 - name: Create directory registries configs
   file:
@@ -159,6 +163,7 @@
   template:
     src: registry-mirror.conf.j2
     dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
+    mode: 0644
   loop: "{{ crio_registries_mirrors }}"
   notify: restart crio
 
@@ -166,6 +171,7 @@
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/crio.service.d/http-proxy.conf
+    mode: 0644
   notify: restart crio
   when: http_proxy is defined or https_proxy is defined
 
diff --git a/roles/container-engine/docker/molecule/default/converge.yml b/roles/container-engine/docker/molecule/default/converge.yml
index 68c44d26d4ca7378a004335effcf3988d7aeb982..afe7a8eb338c560d622526be17a6409101237882 100644
--- a/roles/container-engine/docker/molecule/default/converge.yml
+++ b/roles/container-engine/docker/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: docker
+    - role: container-engine/docker
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index ee779279e3964e95a265f9201ec891451f50c9dc..39df9e88601dbb5f6c3bd7bc1e2e397a8f47c00c 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -80,12 +80,14 @@
   template:
     src: "fedora_docker.repo.j2"
     dest: "{{ yum_repo_dir }}/docker.repo"
+    mode: 0644
   when: ansible_distribution == "Fedora" and not is_ostree
 
 - name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
   template:
     src: "rh_docker.repo.j2"
     dest: "{{ yum_repo_dir }}/docker-ce.repo"
+    mode: 0644
   when:
     - ansible_os_family == "RedHat"
     - ansible_distribution != "Fedora"
@@ -145,7 +147,7 @@
         state: started
       when: docker_task_result is not changed
   rescue:
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Docker start failed. Try to remove our config"
     - name: remove kubespray generated config
       file:
diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml
index b98ae23531f78283b9052ce54cc8caa1600fe5c8..a57061c152a90d5a0d91cc05278aba56b84a0fbe 100644
--- a/roles/container-engine/docker/tasks/systemd.yml
+++ b/roles/container-engine/docker/tasks/systemd.yml
@@ -3,11 +3,13 @@
   file:
     path: /etc/systemd/system/docker.service.d
     state: directory
+    mode: 0755
 
 - name: Write docker proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/docker.service.d/http-proxy.conf
+    mode: 0644
   notify: restart docker
   when: http_proxy is defined or https_proxy is defined
 
@@ -25,6 +27,7 @@
   template:
     src: docker.service.j2
     dest: /etc/systemd/system/docker.service
+    mode: 0644
   register: docker_service_file
   notify: restart docker
   when:
@@ -35,12 +38,14 @@
   template:
     src: docker-options.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
+    mode: 0644
   notify: restart docker
 
 - name: Write docker dns systemd drop-in
   template:
     src: docker-dns.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
+    mode: 0644
   notify: restart docker
   when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
 
@@ -55,7 +60,9 @@
   template:
     src: docker-orphan-cleanup.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
+    mode: 0644
   notify: restart docker
   when: docker_orphan_clean_up | bool
 
-- meta: flush_handlers
+- name: Flush handlers
+  meta: flush_handlers
diff --git a/roles/container-engine/gvisor/molecule/default/converge.yml b/roles/container-engine/gvisor/molecule/default/converge.yml
index 8bf5478e870ae5d3664277f44ad497ebc23a55cf..b14d078a182ca3f96cf016b5075368739565c620 100644
--- a/roles/container-engine/gvisor/molecule/default/converge.yml
+++ b/roles/container-engine/gvisor/molecule/default/converge.yml
@@ -7,5 +7,5 @@
     container_manager: containerd
   roles:
     - role: kubespray-defaults
-    - role: containerd
-    - role: gvisor
+    - role: container-engine/containerd
+    - role: container-engine/gvisor
diff --git a/roles/container-engine/gvisor/molecule/default/prepare.yml b/roles/container-engine/gvisor/molecule/default/prepare.yml
index 084824830cc04bbea76e989c12375be101e0cef1..e5a7e773c5bfa167d5259934c1aa09376d01aa18 100644
--- a/roles/container-engine/gvisor/molecule/default/prepare.yml
+++ b/roles/container-engine/gvisor/molecule/default/prepare.yml
@@ -5,7 +5,7 @@
   roles:
     - role: kubespray-defaults
     - role: bootstrap-os
-    - role: ../adduser
+    - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
     - include_tasks: "../../../../download/tasks/download_file.yml"
@@ -20,8 +20,8 @@
     kube_network_plugin: cni
   roles:
     - role: kubespray-defaults
-    - role: ../network_plugin/cni
-    - role: crictl
+    - role: network_plugin/cni
+    - role: container-engine/crictl
   tasks:
     - name: Copy test container files
       copy:
diff --git a/roles/container-engine/kata-containers/molecule/default/converge.yml b/roles/container-engine/kata-containers/molecule/default/converge.yml
index 99570570517961b15769da16ec731638fc353bd5..3456ee6f82bb7f3889dd29ffa4fc77a1eddb88bb 100644
--- a/roles/container-engine/kata-containers/molecule/default/converge.yml
+++ b/roles/container-engine/kata-containers/molecule/default/converge.yml
@@ -6,5 +6,5 @@
     kata_containers_enabled: true
   roles:
     - role: kubespray-defaults
-    - role: containerd
-    - role: kata-containers
+    - role: container-engine/containerd
+    - role: container-engine/kata-containers
diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml
index 3e4bce90704399cff2d2b144040163a9e3bdf311..8d99e5255f872464f1f8b416fd9f67bc92fd8c57 100644
--- a/roles/container-engine/kata-containers/tasks/main.yml
+++ b/roles/container-engine/kata-containers/tasks/main.yml
@@ -15,11 +15,13 @@
   file:
     path: "{{ kata_containers_config_dir }}"
     state: directory
+    mode: 0755
 
 - name: kata-containers | Set configuration
   template:
     src: "{{ item }}.j2"
     dest: "{{ kata_containers_config_dir }}/{{ item }}"
+    mode: 0644
   with_items:
     - configuration-qemu.toml
 
diff --git a/roles/container-engine/meta/main.yml b/roles/container-engine/meta/main.yml
index 2f6bff147e5d234cdab16b86c5b4dc8c0866b975..8bd98bd0228740368d4030ed7673b336c600063f 100644
--- a/roles/container-engine/meta/main.yml
+++ b/roles/container-engine/meta/main.yml
@@ -1,3 +1,4 @@
+# noqa role-name - this is a meta role that doesn't need a name
 ---
 dependencies:
   - role: container-engine/kata-containers
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 21b3cbd22056e866232b299c1e5df2ecf3e98678..79e92632e93649b107fe0bc122a6432938756a30 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -18,7 +18,7 @@
       when:
         - not download_always_pull
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
 
     - name: download_container | Determine if image is in cache
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index a6725fdcfe0dd7a1ec13ce3c0b1f602603cf1f6a..f7dcfda10ffdc4243107159c31c3878fa72b0ec5 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -48,6 +48,7 @@
     - not download_localhost
 
   # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
+  # This task will avoid logging it's parameters to not leak environment passwords in the log
   - name: download_file | Download item
     get_url:
       url: "{{ download.url }}"
@@ -67,6 +68,7 @@
     retries: 4
     delay: "{{ retry_stagger | default(5) }}"
     environment: "{{ proxy_env }}"
+    no_log: true
 
   - name: download_file | Copy file back to ansible host file cache
     synchronize:
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 475040a5033b3fa78dfa434e954e3301c1c72beb..e57266bd8f36d0d9b160c7d2de91d04102f76776 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -38,7 +38,7 @@
   run_once: true
   register: test_become
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
   when:
     - download_localhost
@@ -53,7 +53,7 @@
   run_once: true
   register: test_docker
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: false
   when:
     - download_localhost
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index c520a9416f70703c5a1abccb80318093df874264..aa21849e08fc87fc7b313ab0d03ec363b62d174c 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -18,6 +18,7 @@
   template:
     src: "kubeadm-images.yaml.j2"
     dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
+    mode: 0644
   when:
     - not skip_kubeadm_images|default(false)
 
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 331dec72f810dc6d1349870da1e240ab3c29c493..7534e4176c1f38a2e9a099c638e1699a1d390d36 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -45,6 +45,7 @@
     src: "etcd-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd.service
     backup: yes
+    mode: 0644
   when: is_etcd_master and etcd_cluster_setup
 
 - name: Configure | Copy etcd-events.service systemd file
@@ -52,6 +53,7 @@
     src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd-events.service
     backup: yes
+    mode: 0644
   when: is_etcd_master and etcd_events_cluster_setup
 
 - name: Configure | reload systemd
@@ -65,7 +67,7 @@
     name: etcd
     state: started
     enabled: yes
-  ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
+  ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_cluster_setup
 
 # when scaling new etcd will fail to start
@@ -74,7 +76,7 @@
     name: etcd-events
     state: started
     enabled: yes
-  ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
+  ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_events_cluster_setup
 
 - name: Configure | Wait for etcd cluster to be healthy
@@ -126,7 +128,7 @@
 - name: Configure | Check if member is in etcd cluster
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
   register: etcd_member_in_cluster
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   when: is_etcd_master and etcd_cluster_setup
@@ -142,7 +144,7 @@
 - name: Configure | Check if member is in etcd-events cluster
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
   register: etcd_events_member_in_cluster
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 2b4e9297c09972672e34dc93c55ddd9f3920cef3..3825e3c62c510c0a6a5b1dce24662ff563a7f23a 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -21,6 +21,7 @@
   template:
     src: "openssl.conf.j2"
     dest: "{{ etcd_config_dir }}/openssl.conf"
+    mode: 0640
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index 21c308fb0189f191a5407d577f4dcefdc4e299d0..57010fee15a4da008e0a8b0a672b1f7fca97f4be 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -3,6 +3,7 @@
   template:
     src: etcd.env.j2
     dest: /etc/etcd.env
+    mode: 0640
   notify: restart etcd
   when: is_etcd_master and etcd_cluster_setup
 
@@ -10,5 +11,6 @@
   template:
     src: etcd-events.env.j2
     dest: /etc/etcd-events.env
+    mode: 0640
   notify: restart etcd-events
   when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index 0d1ba1231876614e0e6cae96235853e4dccd979e..cfa4965b332c45a320ecc24b29f4729b7e4a3eb7 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -21,6 +21,7 @@
     src: "{{ etcd_cert_dir }}/ca.pem"
     dest: "{{ ca_cert_path }}"
     remote_src: true
+    mode: 0640
   register: etcd_ca_cert
 
 - name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar)  # noqa 503
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
index 2f774cfcd8162c79711a390cc078f70597049350..538fc22fcf1195b27669d23f68358662a4decaa9 100644
--- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -3,7 +3,7 @@
   shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
   register: createdby_annotation
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   when:
     - dns_mode in ['coredns', 'coredns_dual']
     - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 2f5f110af62d1432897e0b69fd7ee2703c9aa92c..c477c2a419844db0cfb05c3d6c73f23c6dc85819 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -30,6 +30,7 @@
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
+    mode: 0640
   register: psp_manifests
   with_items:
     - {file: psp.yml, type: psp, name: psp}
@@ -61,6 +62,7 @@
   template:
     src: "node-crb.yml.j2"
     dest: "{{ kube_config_dir }}/node-crb.yml"
+    mode: 0640
   register: node_crb_manifest
   when:
     - rbac_enabled
@@ -86,6 +88,7 @@
   template:
     src: "node-webhook-cr.yml.j2"
     dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
+    mode: 0640
   register: node_webhook_cr_manifest
   when:
     - rbac_enabled
@@ -111,6 +114,7 @@
   template:
     src: "node-webhook-crb.yml.j2"
     dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
+    mode: 0640
   register: node_webhook_crb_manifest
   when:
     - rbac_enabled
@@ -139,7 +143,7 @@
     - cloud_provider == 'oci'
 
 - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
-  copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
+  copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
   when: inventory_hostname == groups['kube_control_plane']|last
 
 - name: PriorityClass | Create k8s-cluster-critical
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
index 72142eae63d6c85ff8a610e88219dd42e3189879..eb074634e7c5b6fd2eb867057c54b72300bca54d 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -3,6 +3,7 @@
   copy:
     src: "oci-rbac.yml"
     dest: "{{ kube_config_dir }}/oci-rbac.yml"
+    mode: 0640
   when:
   - cloud_provider is defined
   - cloud_provider == 'oci'
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
index 15b2ecf2b0eedc661846ebfffeaecf1bf6246221..1c1534698dd3e129670b5ae5ef03627b1c3b7d37 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: CephFS Provisioner | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -21,7 +21,7 @@
 - name: CephFS Provisioner | Remove legacy storageclass
   shell: |
     {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
index e25e0b1437346f81a31d34fc90e254ae811428ca..06bc1884900f0605059dcf3c97cbb6a501bc880c 100644
--- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: RBD Provisioner | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errrors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -21,7 +21,7 @@
 - name: RBD Provisioner | Remove legacy storageclass
   shell: |
     {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errrors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -63,6 +63,7 @@
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
+    mode: 0644
   with_items: "{{ rbd_provisioner_templates }}"
   register: rbd_provisioner_manifests
   when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
index 42112b0d5ea1574d88f15357c51dbd516cd8c7c5..4217c6075124bd46c6726980e913ed39e726b98e 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: Cert Manager | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml
index 12e7045414e16f313b2865483f8a220e5ab68e78..551f2f28ae34b4d18c115db141cc35be1e792c6f 100644
--- a/roles/kubernetes-apps/metallb/tasks/main.yml
+++ b/roles/kubernetes-apps/metallb/tasks/main.yml
@@ -55,7 +55,7 @@
   command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
   register: metallb_secret
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index 3e483bf7fa891d245b6dc741e11c2d90e8afc8c4..45a64d2b2a96186f81d9adad2781257ab113653f 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -12,12 +12,12 @@
   run_once: true
 
 - name: kube-router | Wait for kube-router pods to be ready
-  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601 ignore-errors
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
   retries: 30
   delay: 10
-  ignore_errors: yes
+  ignore_errors: true
   delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
   changed_when: false
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
index 1e1dda97fb33b44929b5e66628a6e2f1711f15b5..36bb62798ecd7010c7aa538ab9a494e9b7232839 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
@@ -12,7 +12,7 @@
     - apiserver-kubelet-client.key
     - front-proxy-client.crt
     - front-proxy-client.key
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Backup old confs
   copy:
@@ -25,4 +25,4 @@
     - controller-manager.conf
     - kubelet.conf
     - scheduler.conf
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index b362a2a4908e1d1bf938096abd6c566631915f62..6176ba893665828f3f9b8ce28ff69bd2beb0200b 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -50,18 +50,21 @@
   file:
     path: "{{ audit_policy_file | dirname }}"
     state: directory
+    mode: 0640
   when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
 
 - name: Write api audit policy yaml
   template:
     src: apiserver-audit-policy.yaml.j2
     dest: "{{ audit_policy_file }}"
+    mode: 0640
   when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
 
 - name: Write api audit webhook config yaml
   template:
     src: apiserver-audit-webhook-config.yaml.j2
     dest: "{{ audit_webhook_config_file }}"
+    mode: 0640
   when: kubernetes_audit_webhook|default(false)
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index a073b5ded0c939c333f9a08760bc3ff7ae483ee0..ea2dd2d02597db4414c65ea952262ef61447f478 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -7,12 +7,14 @@
   template:
     src: webhook-token-auth-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
+    mode: 0640
   when: kube_webhook_token_auth|default(false)
 
 - name: Create webhook authorization config
   template:
     src: webhook-authorization-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
+    mode: 0640
   when: kube_webhook_authorization|default(false)
 
 - name: Create kube-scheduler config
@@ -40,7 +42,7 @@
   when: ansible_os_family in ["Debian","RedHat"]
   tags:
     - kubectl
-  ignore_errors: True
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Set kubectl bash completion file permissions
   file:
@@ -52,7 +54,7 @@
   tags:
     - kubectl
     - upgrade
-  ignore_errors: True
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
   set_fact:
@@ -77,12 +79,13 @@
   template:
     src: k8s-certs-renew.sh.j2
     dest: "{{ bin_dir }}/k8s-certs-renew.sh"
-    mode: '755'
+    mode: 0755
 
 - name: Renew K8S control plane certificates monthly 1/2
   template:
     src: "{{ item }}.j2"
     dest: "/etc/systemd/system/{{ item }}"
+    mode: 0644
   with_items:
     - k8s-certs-renew.service
     - k8s-certs-renew.timer
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 5cb654320a28e2d029583bc177831d0ba9f47335..6a02f0dab7a3ea737a1532280a45b7ed35a15020 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -61,6 +61,7 @@
     src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubeadm-client.conf"
     backup: yes
+    mode: 0640
   when: not is_kube_master
 
 - name: Join to cluster if needed
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index d01fda83588c4d8c1238d644c5cb7b70947bd46d..b7f8138a66e9479be78a88510773826258aca26e 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -35,8 +35,10 @@
     - node_labels is defined
     - node_labels is mapping
 
-- debug: var=role_node_labels
-- debug: var=inventory_node_labels
+- debug:  # noqa unnamed-task
+    var: role_node_labels
+- debug:  # noqa unnamed-task
+    var: inventory_node_labels
 
 - name: Set label to node
   command: >-
diff --git a/roles/kubernetes/node/tasks/kubelet.yml b/roles/kubernetes/node/tasks/kubelet.yml
index 8bff4077c1efae3f5f554629f6739e24e98bf15a..88204e012e1f21971ea4120ac8a3e06115001de0 100644
--- a/roles/kubernetes/node/tasks/kubelet.yml
+++ b/roles/kubernetes/node/tasks/kubelet.yml
@@ -18,6 +18,7 @@
     src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubelet.env"
     backup: yes
+    mode: 0640
   notify: Node | restart kubelet
   tags:
     - kubelet
@@ -27,6 +28,7 @@
   template:
     src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
     dest: "{{ kube_config_dir }}/kubelet-config.yaml"
+    mode: 0640
   notify: Kubelet | restart kubelet
   tags:
     - kubelet
@@ -37,6 +39,7 @@
     src: "kubelet.service.j2"
     dest: "/etc/systemd/system/kubelet.service"
     backup: "yes"
+    mode: 0644
   notify: Node | restart kubelet
   tags:
     - kubelet
diff --git a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
index 972878bf79f48165c15004e5e8489e238afab947..67f40f6ddac24d601ba20bc10e937064dbfad9e4 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
@@ -31,3 +31,4 @@
   template:
     src: manifests/haproxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/haproxy.yml"
+    mode: 0640
diff --git a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
index f90084cbc597799c88533fb89ef48adedd04beae..e176cb9767adf26110ec27aa4e37fa2dc209ec8f 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
@@ -31,3 +31,4 @@
   template:
     src: manifests/nginx-proxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
+    mode: 0640
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index d4b7f50141f96ac29520962789d1b93b82ecb1be..4cb29d65ab213ea11cfc1da186ef21b0844f6a18 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -57,6 +57,7 @@
   file:
     path: /etc/modules-load.d
     state: directory
+    mode: 0755
 
 - name: Enable br_netfilter module
   modprobe:
@@ -68,6 +69,7 @@
   copy:
     dest: /etc/modules-load.d/kubespray-br_netfilter.conf
     content: br_netfilter
+    mode: 0644
   when: modinfo_br_netfilter.rc == 0
 
 # kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
@@ -108,7 +110,7 @@
     name: nf_conntrack_ipv4
     state: present
   register: modprobe_nf_conntrack_ipv4
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - kube_proxy_mode == 'ipvs'
   tags:
@@ -117,6 +119,7 @@
 - name: Persist ip_vs modules
   copy:
     dest: /etc/modules-load.d/kube_proxy-ipvs.conf
+    mode: 0644
     content: |
       ip_vs
       ip_vs_rr
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
index 73028e0f6982f0a18ec79d997417d76aa5467656..74789319ead768eca043b1dbc97bf8490c3dcc3d 100644
--- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -16,4 +16,4 @@
 - name: Disable swap
   command: /sbin/swapoff -a
   when: swapon.stdout
-  ignore_errors: "{{ ansible_check_mode }}"
+  ignore_errors: "{{ ansible_check_mode }}"  # noqa ignore-errors
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index f184670ab7a08e27df58dffc91f902dfa4c9acd3..312df995af88a78287273d93069a7b5135cd4ea7 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -4,6 +4,7 @@
     path: "{{ item }}"
     state: directory
     owner: kube
+    mode: 0755
   when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
@@ -28,6 +29,7 @@
     path: "{{ item }}"
     state: directory
     owner: root
+    mode: 0755
   when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
@@ -59,6 +61,7 @@
     src: "{{ kube_cert_dir }}"
     dest: "{{ kube_cert_compat_dir }}"
     state: link
+    mode: 0755
   when:
     - inventory_hostname in groups['k8s_cluster']
     - kube_cert_dir != kube_cert_compat_dir
@@ -69,6 +72,7 @@
     path: "{{ item }}"
     state: directory
     owner: kube
+    mode: 0755
   with_items:
     - "/etc/cni/net.d"
     - "/opt/cni/bin"
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 39921595ec408319414625d17919cada811d6027..332f49d8670f5641fc58cac256fb97c9b2e25b74 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -18,6 +18,7 @@
     create: yes
     backup: yes
     marker: "# Ansible entries {mark}"
+    mode: 0644
   notify: Preinstall | propagate resolvconf to k8s components
 
 - name: Remove search/domain/nameserver options before block
diff --git a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
index b8b673bd232052fa7fbd25a310867a84e5a66b85..d24a4fffa1c8b22edb80858662553878214be9e5 100644
--- a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
+++ b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
@@ -19,6 +19,7 @@
       [keyfile]
       unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
     dest: /etc/NetworkManager/conf.d/calico.conf
+    mode: 0644
   when:
     - nm_check.rc == 0
     - kube_network_plugin == "calico"
@@ -32,5 +33,6 @@
       [keyfile]
       unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
     dest: /etc/NetworkManager/conf.d/k8s.conf
+    mode: 0644
   when: nm_check.rc == 0
   notify: Preinstall | reload NetworkManager
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index 051b9aa351e084e4ba227e2c5f962fc964e6e55a..ddc33fa329b5f2a858319b9b0b4c57c4c79e8e9d 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -30,6 +30,7 @@
     state: present
     create: yes
     backup: yes
+    mode: 0644
   when:
     - disable_ipv6_dns
     - not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
@@ -59,6 +60,7 @@
   file:
     name: "{{ sysctl_file_path | dirname }}"
     state: directory
+    mode: 0755
 
 - name: Enable ip forwarding
   sysctl:
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 95bc711dcdad26c5e8c8d09aa776963825a6ceb1..32a2c8e775ff3ec5e338ea5e501dc226078c13dd 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -22,6 +22,7 @@
     backup: yes
     unsafe_writes: yes
     marker: "# Ansible inventory hosts {mark}"
+    mode: 0644
   when: populate_inventory_to_hosts_file
 
 - name: Hosts | populate kubernetes loadbalancer address into hosts file
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 6599a21d4f2b83057079956c691ae5492a6ac883..28aed07407b3161fef9188677b1362f80b2b2973 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -11,6 +11,7 @@
     insertbefore: BOF
     backup: yes
     marker: "# Ansible entries {mark}"
+    mode: 0644
   notify: Preinstall | propagate resolvconf to k8s components
   when: dhclientconffile is defined
 
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 3d345122179e238b23866600156d7f81d0613492..bf15a7055dafc11617769bb764ed67d8ff95ee99 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -91,7 +91,8 @@
 
 # We need to make sure the network is restarted early enough so that docker can later pick up the correct system
 # nameservers and search domains
-- meta: flush_handlers
+- name: Flush handlers
+  meta: flush_handlers
 
 - name: Check if we are running inside a Azure VM
   stat:
diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml
index 9824337e225c7486787dd6390f8bbab1878e7e21..4715603d902b8bdaeb7c980f556a36f16abd6444 100644
--- a/roles/network_plugin/cilium/tasks/apply.yml
+++ b/roles/network_plugin/cilium/tasks/apply.yml
@@ -16,7 +16,7 @@
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: 30
   delay: 10
-  ignore_errors: yes
+  fail_when: false
   when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cilium | Hubble install
diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml
index f107eed64c44086901385f74e65b04b089ae2fa5..e331f2b149226c590e75740b5b99fd8d440a1df4 100644
--- a/roles/network_plugin/kube-router/tasks/main.yml
+++ b/roles/network_plugin/kube-router/tasks/main.yml
@@ -23,7 +23,7 @@
   slurp:
     src: /etc/cni/net.d/10-kuberouter.conflist
   register: cni_config_slurp
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - name: kube-router | Set cni_config variable
   set_fact:
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index a0e7025750ea93b96e07517b8eae73710dff1344..e3dc339307d61f03ed46c8cf8f41894d880996fa 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Get etcd endpoint health
   command: "{{ bin_dir }}/etcdctl endpoint health"
   register: etcd_endpoint_health
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   environment:
@@ -38,13 +38,13 @@
     state: absent
   delegate_to: "{{ item }}"
   with_items: "{{ groups['broken_etcd'] }}"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   when:
     - groups['broken_etcd']
     - has_quorum
 
 - name: Delete old certificates
-  # noqa 302 - rm is ok here for now
+  # noqa 302 ignore-error - rm is ok here for now
   shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
   with_items: "{{ groups['broken_etcd'] }}"
   register: delete_old_cerificates
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index bef89f192bb4236f2dfcab60d22487900b14283e..1ecc90fef7e47e0437050886fa2166ac04117150 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -13,6 +13,7 @@
   copy:
     src: "{{ etcd_snapshot }}"
     dest: /tmp/snapshot.db
+    mode: 0640
   when: etcd_snapshot is defined
 
 - name: Stop etcd
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index fd4c6fc582bab127e4a8ace199d873900dbac5d5..3205c008fa72e0aa2c15467ccd978b23c16b6e5d 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Delete node  # noqa 301
+- name: Delete node  # noqa 301 ignore-errors
   command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
   delegate_to: "{{ groups['kube_control_plane']|first }}"
-  ignore_errors: yes
\ No newline at end of file
+  ignore_errors: true
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index 5c800044f828d7b247e709a5009656ca26cd55fd..c69dd906957917ca22b31af1811c95b247552e0a 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -27,7 +27,7 @@
 - name: Lookup etcd member id
   shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
   register: etcd_member_id
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   tags:
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 0d17d2e88d90e16f8ab0688830a486e685ea2213..00029b09b7b64ff52e378dff02deca19f7e1f8f2 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -86,7 +86,7 @@
   when:
     - crictl.stat.exists
     - container_manager in ["crio", "containerd"]
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - name: reset | force remove all cri containers
   command: "{{ bin_dir }}/crictl rm -a -f"
@@ -129,7 +129,7 @@
   when:
     - crictl.stat.exists
     - container_manager == "containerd"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - block:
     - name: reset | force remove all cri pods
@@ -206,7 +206,7 @@
 
 - name: Clear IPVS virtual server table
   command: "ipvsadm -C"
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
 
@@ -306,7 +306,7 @@
     - /etc/modules-load.d/kube_proxy-ipvs.conf
     - /etc/modules-load.d/kubespray-br_netfilter.conf
     - /usr/libexec/kubernetes
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   tags:
     - files
 
@@ -333,7 +333,7 @@
     - dns
 
 - name: reset | include file with reset tasks specific to the network_plugin if exists
-  include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath  }}"
+  include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | path_join | realpath  }}"
   when:
     - kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
   tags:
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index 32f511a4e3ddcd69bf153d32221734508c643be8..77da68352fb472d92873fc0a086248a5473747e6 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -29,10 +29,12 @@
       register: patch_kube_proxy_state
       when: current_kube_proxy_state.stdout | trim | lower != "linux"
 
-    - debug: msg={{ patch_kube_proxy_state.stdout_lines }}
+    - debug:  # noqa unnamed-task
+        msg: "{{ patch_kube_proxy_state.stdout_lines }}"
       when: patch_kube_proxy_state is not skipped
 
-    - debug: msg={{ patch_kube_proxy_state.stderr_lines }}
+    - debug:  # noqa unnamed-task
+        msg: "{{ patch_kube_proxy_state.stderr_lines }}"
       when: patch_kube_proxy_state is not skipped
   tags: init
   when:
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 957c1aed7b7f76e11eb911075b6430c14811549c..3f31217d308122b8849f5c2aaa08a221652315c6 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -135,6 +135,7 @@
         path: "/tmp/{{ archive_dirname }}"
         dest: "{{ dir|default('.') }}/logs.tar.gz"
         remove: true
+        mode: 0640
       delegate_to: localhost
       connection: local
       become: false
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index e2ef4e8fc7855ea6f2b53f4d900d2ee2e7f9fb67..a0b36bebb6f6ac59f8c3525a6b4d45f21590a926 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -4,6 +4,7 @@
   file:
     state: directory
     path: "{{ images_dir }}"
+    mode: 0755
 
 - name: Download images files
   get_url:
@@ -39,6 +40,7 @@
   template:
     src: Dockerfile
     dest: "{{ images_dir }}/Dockerfile"
+    mode: 0644
 
 - name: Create docker images for each OS  # noqa 301
   command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index a1982edfabc07f55c3669b6511fb98b419604fee..8a03c92594a8a69f5783c9a0dc8a793c962e9a00 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -22,3 +22,4 @@
     template:
       src: ../templates/inventory-aws.j2  # noqa 404 CI inventory templates are not in role_path
       dest: "{{ inventory_path }}"
+      mode: 0644
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 3b58aa0d37470f100aef5cf9cce3338529e4672b..3726eb158557fc5c66db6eef1230f251aa409e0b 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -79,7 +79,7 @@
       register: droplets
       with_items: "{{ instance_names }}"
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "{{ droplets }}, {{ inventory_path }}"
       when: state == 'present'
 
@@ -87,4 +87,5 @@
       template:
         src: ../templates/inventory-do.j2  # noqa 404 CI templates are not in role_path
         dest: "{{ inventory_path }}"
+        mode: 0644
       when: state == 'present'
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index f9f474f83ee86290fafcb08fe8a4bf5447c2fb18..f94b05bcb57ebb96505b165fe6750c8565148d11 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -28,7 +28,7 @@
           {%- endif -%}
 
     - name: Create gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         machine_type: "{{ cloud_machine_type }}"
         image: "{{ cloud_image | default(omit) }}"
@@ -53,17 +53,20 @@
       template:
         src: ../templates/inventory-gce.j2
         dest: "{{ inventory_path }}"
+        mode: 0644
 
     - name: Make group_vars directory
       file:
         path: "{{ inventory_path|dirname }}/group_vars"
         state: directory
+        mode: 0755
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Template fake hosts group vars  # noqa 404 CI templates are not in role_path
       template:
         src: ../templates/fake_hosts.yml.j2
         dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
+        mode: 0644
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Delete group_vars directory
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index 00e671240008fe2c736a60b01d4eed9b6f4abe77..b88abea1c7a7ef0530ee2f374d1e0d9a2a26518b 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -20,7 +20,7 @@
           {%- endif -%}
 
     - name: stop gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
@@ -34,7 +34,7 @@
       register: gce
 
     - name: delete gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
index bbbce6e01eca9a62e3a50c22de7e89b63fe96597..d939db02ceeefd05760d1efbc10bb86964bd0d72 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
@@ -12,11 +12,13 @@
   file:
     path: "/tmp/{{ test_name }}"
     state: directory
+    mode: 0755
 
 - name: Template vm files for CI job
   template:
     src: "vm.yml.j2"
     dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
+    mode: 0644
   loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
   loop_control:
     index_var: vm_id
@@ -47,5 +49,6 @@
   template:
     src: "inventory.j2"
     dest: "{{ inventory_path }}"
+    mode: 0644
   vars:
     vms: "{{ vm_ips }}"
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index 679f80558eec7aaf5f6323e3633eb3b845ce37ed..eeb0edb799fdc350647e3a4e637797e4f7f879ea 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -33,11 +33,13 @@
       template:
         src: gcs_life.json.j2
         dest: "{{ dir }}/gcs_life.json"
+        mode: 0644
 
     - name: Create a boto config to access GCS
       template:
         src: boto.j2
         dest: "{{ dir }}/.boto"
+        mode: 0640
       no_log: True
 
     - name: Download gsutil cp installer
@@ -74,5 +76,5 @@
       failed_when: false
       no_log: True
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 6519907567d0e8aac8da41fc8a04c1b8474637d4..2524ef93ce6925b2eae31b602918da8cbccf23c4 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -4,7 +4,7 @@ apache-libcloud==2.2.1
 tox==3.11.1
 dopy==0.3.7
 cryptography==2.8
-ansible-lint==4.2.0
+ansible-lint==5.0.11
 openshift==0.8.8
 molecule==3.0.6
 molecule-vagrant==0.3
diff --git a/tests/scripts/testcases_prepare.sh b/tests/scripts/testcases_prepare.sh
index 454315783dd909513f37f0cba2542f821c91c2eb..d70086a2b1de6630457d931b0528fde355e60b64 100755
--- a/tests/scripts/testcases_prepare.sh
+++ b/tests/scripts/testcases_prepare.sh
@@ -1,6 +1,7 @@
 #!/bin/bash
 set -euxo pipefail
 
+/usr/bin/python -m pip uninstall -y ansible
 /usr/bin/python -m pip install -r tests/requirements.txt
 mkdir -p /.ssh
 mkdir -p cluster-dump
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index adf0a35c92d5db2e19775fda6d9fcdaed251b237..e84bad264346e9d27abde39fa68f693183086032 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -9,7 +9,7 @@
       status_code: 200
     register: apiserver_response
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ apiserver_response.json }}"
 
   - name: Check API servers version
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index 0faa1d46b99bfed4cffd3afdfa51d63489a2f901..34f592394a583e2463b7f1a27d0f63cbc319b26f 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_nodes
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_nodes.stdout.split('\n') }}"
 
   - name: Check that all nodes are running and ready
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index edea22a5c024effacf6a8e15faa9522c1bc25ef8..c83c9be9011cb92b850855d5b0a6842aaf9de9cf 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
@@ -44,6 +44,6 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index a9386db88f8949dada943de598d92c69fc169028..13f353b79649a60c8f3c3e2021725558fb5ce52e 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -34,7 +34,7 @@
       when: get_csr.stdout_lines | length > 0
       changed_when: certificate_approve.stdout
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "{{ certificate_approve.stdout.split('\n') }}"
 
     when: kubelet_rotate_server_certificates | default(false)
@@ -60,7 +60,7 @@
     - busybox1
     - busybox2
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check that all pods are running and ready
@@ -83,7 +83,7 @@
     register: pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
@@ -92,7 +92,7 @@
             jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     changed_when: false
     register: hostnet_pods
-    ignore_errors: true
+    ignore_errors: true  # noqa ignore-errors
     no_log: true
 
   - name: Get running pods
@@ -108,7 +108,7 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Set networking facts
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 18cf6daf10004df0649e52dda99eb295772c74f3..358a199838fb8b9f18cc23d9bf8c3199c8ae17e0 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -26,7 +26,7 @@
         bin_dir: "/usr/local/bin"
       when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-    - import_role:
+    - import_role:  # noqa unnamed-task
         name: cluster-dump
 
     - name: Wait for netchecker server
@@ -60,7 +60,7 @@
         - netchecker-agent-hostnet
       when: not nca_pod is success
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: nca_pod.stdout_lines
       failed_when: not nca_pod is success
       when: inventory_hostname == groups['kube_control_plane'][0]
@@ -80,7 +80,7 @@
       failed_when: false
       no_log: true
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: agents.content | from_json
       failed_when: not agents is success and not agents.content=='{}'
       run_once: true
@@ -106,7 +106,7 @@
       when:
         - agents.content != '{}'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: ncs_pod
       run_once: true
       when: not result is success
@@ -131,7 +131,7 @@
         - calico-node
         - cilium
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: result.content | from_json
       failed_when: not result is success
       run_once: true
@@ -140,14 +140,14 @@
         - result.content
         - result.content[0] == '{'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: result
       failed_when: not result is success
       run_once: true
       when:
         - not agents.content == '{}'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Cannot get reports from agents, consider as PASSING"
       run_once: true
       when:
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index 966a13c3ddc7586e9bb4e5b3c08b261f2090f8c3..96419e8a2836f5859dee08a2ec839c7d085a2b87 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -8,6 +8,7 @@
   archive:
     path: /tmp/cluster-dump
     dest: /tmp/cluster-dump.tgz
+    mode: 0644
   when: inventory_hostname in groups['kube_control_plane']
 
 - name: Fetch dump file