From 7516fe142f41222a649c325434614cb90b01bfab Mon Sep 17 00:00:00 2001
From: Cristian Calin <6627509+cristicalin@users.noreply.github.com>
Date: Mon, 12 Jul 2021 10:00:47 +0300
Subject: [PATCH] Move to Ansible 3.4.0 (#7672)

* Ansible: move to Ansible 3.4.0 which uses ansible-base 2.10.10

* Docs: add a note about ansible upgrade post 2.9.x

* CI: ensure ansible is removed before ansible 3.x is installed to avoid pip failures

* Ansible: use newer ansible-lint

* Fix ansible-lint 5.0.11 found issues

* syntax issues
* risky-file-permissions
* var-naming
* role-name
* molecule tests

* Mitogen: use 0.3.0rc1 which adds support for ansible 2.10+

* Pin ansible-base to 2.10.11 to get package fix on RHEL8
---
 .ansible-lint                                 | 10 ++++
 .gitlab-ci.yml                                |  1 +
 .gitlab-ci/lint.yml                           |  1 +
 .gitlab-ci/vagrant.yml                        |  2 +
 ansible_version.yml                           | 12 +++++
 .../roles/generate-inventory/tasks/main.yml   |  1 +
 .../roles/generate-inventory_2/tasks/main.yml |  2 +
 .../roles/generate-templates/tasks/main.yml   |  2 +
 .../dind/roles/dind-cluster/tasks/main.yaml   |  2 +
 .../kvm-setup/roles/kvm-setup/tasks/user.yml  |  1 +
 .../roles/glusterfs/server/tasks/main.yml     |  1 +
 .../roles/glusterfs/server/tests/test.yml     |  5 --
 .../provision/tasks/bootstrap/deploy.yml      |  5 +-
 .../provision/tasks/bootstrap/topology.yml    |  1 +
 .../roles/provision/tasks/glusterfs.yml       | 10 +++-
 .../heketi/roles/provision/tasks/heketi.yml   |  1 +
 .../heketi/roles/provision/tasks/secret.yml   |  7 +--
 .../heketi/roles/provision/tasks/storage.yml  |  5 +-
 .../roles/provision/tasks/storageclass.yml    |  1 +
 .../heketi/roles/provision/tasks/topology.yml |  1 +
 .../roles/tear-down-disks/tasks/main.yml      |  4 +-
 .../heketi/roles/tear-down/tasks/main.yml     | 48 +++++++++----------
 docs/ansible.md                               | 25 ++++++++++
 mitogen.yml                                   |  4 +-
 recover-control-plane.yml                     |  6 +--
 requirements.txt                              |  3 +-
 roles/bastion-ssh-config/tasks/main.yml       |  1 +
 roles/bootstrap-os/tasks/bootstrap-centos.yml |  3 ++
 .../tasks/bootstrap-fedora-coreos.yml         |  4 +-
 roles/bootstrap-os/tasks/bootstrap-fedora.yml |  1 +
 .../bootstrap-os/tasks/bootstrap-opensuse.yml |  2 +-
 roles/bootstrap-os/tasks/bootstrap-redhat.yml |  9 ++--
 .../containerd/molecule/default/converge.yml  |  2 +-
 .../containerd/tasks/containerd_repo.yml      |  2 +
 .../containerd/tasks/main.yml                 |  4 +-
 .../cri-o/molecule/default/converge.yml       |  2 +-
 .../cri-o/tasks/crio_repo.yml                 |  2 +
 roles/container-engine/cri-o/tasks/main.yaml  |  8 +++-
 .../docker/molecule/default/converge.yml      |  2 +-
 roles/container-engine/docker/tasks/main.yml  |  4 +-
 .../container-engine/docker/tasks/systemd.yml |  9 +++-
 .../gvisor/molecule/default/converge.yml      |  4 +-
 .../gvisor/molecule/default/prepare.yml       |  6 +--
 .../molecule/default/converge.yml             |  4 +-
 .../kata-containers/tasks/main.yml            |  2 +
 roles/container-engine/meta/main.yml          |  1 +
 roles/download/tasks/download_container.yml   |  2 +-
 roles/download/tasks/download_file.yml        |  2 +
 roles/download/tasks/prep_download.yml        |  4 +-
 roles/download/tasks/prep_kubeadm_images.yml  |  1 +
 roles/etcd/tasks/configure.yml                | 10 ++--
 roles/etcd/tasks/gen_certs_script.yml         |  1 +
 roles/etcd/tasks/refresh_config.yml           |  2 +
 roles/etcd/tasks/upd_ca_trust.yml             |  1 +
 .../ansible/tasks/cleanup_dns.yml             |  2 +-
 .../cluster_roles/tasks/main.yml              |  6 ++-
 .../cluster_roles/tasks/oci.yml               |  1 +
 .../cephfs_provisioner/tasks/main.yml         |  4 +-
 .../rbd_provisioner/tasks/main.yml            |  5 +-
 .../cert_manager/tasks/main.yml               |  2 +-
 roles/kubernetes-apps/metallb/tasks/main.yml  |  2 +-
 .../network_plugin/kube-router/tasks/main.yml |  4 +-
 .../control-plane/tasks/kubeadm-backup.yml    |  4 +-
 .../control-plane/tasks/kubeadm-setup.yml     |  3 ++
 roles/kubernetes/control-plane/tasks/main.yml |  9 ++--
 roles/kubernetes/kubeadm/tasks/main.yml       |  1 +
 roles/kubernetes/node-label/tasks/main.yml    |  6 ++-
 roles/kubernetes/node/tasks/kubelet.yml       |  3 ++
 .../node/tasks/loadbalancer/haproxy.yml       |  1 +
 .../node/tasks/loadbalancer/nginx-proxy.yml   |  1 +
 roles/kubernetes/node/tasks/main.yml          |  5 +-
 .../preinstall/tasks/0010-swapoff.yml         |  2 +-
 .../tasks/0050-create_directories.yml         |  4 ++
 .../preinstall/tasks/0060-resolvconf.yml      |  1 +
 .../0062-networkmanager-unmanaged-devices.yml |  2 +
 .../tasks/0080-system-configurations.yml      |  2 +
 .../preinstall/tasks/0090-etchosts.yml        |  1 +
 .../preinstall/tasks/0100-dhclient-hooks.yml  |  1 +
 roles/kubernetes/preinstall/tasks/main.yml    |  3 +-
 roles/network_plugin/cilium/tasks/apply.yml   |  2 +-
 .../network_plugin/kube-router/tasks/main.yml |  2 +-
 .../recover_control_plane/etcd/tasks/main.yml |  6 +--
 .../etcd/tasks/recover_lost_quorum.yml        |  1 +
 roles/remove-node/post-remove/tasks/main.yml  |  4 +-
 .../remove-etcd-node/tasks/main.yml           |  2 +-
 roles/reset/tasks/main.yml                    | 10 ++--
 .../win_nodes/kubernetes_patch/tasks/main.yml |  6 ++-
 scripts/collect-info.yaml                     |  1 +
 .../roles/kubevirt-images/tasks/main.yml      |  2 +
 tests/cloud_playbooks/create-aws.yml          |  1 +
 tests/cloud_playbooks/create-do.yml           |  3 +-
 tests/cloud_playbooks/create-gce.yml          |  5 +-
 tests/cloud_playbooks/delete-gce.yml          |  4 +-
 .../roles/packet-ci/tasks/create-vms.yml      |  3 ++
 tests/cloud_playbooks/upload-logs-gcs.yml     |  4 +-
 tests/requirements.txt                        |  2 +-
 tests/scripts/testcases_prepare.sh            |  1 +
 tests/testcases/010_check-apiserver.yml       |  2 +-
 tests/testcases/015_check-nodes-ready.yml     |  4 +-
 tests/testcases/020_check-pods-running.yml    |  6 +--
 tests/testcases/030_check-network.yml         | 10 ++--
 tests/testcases/040_check-network-adv.yml     | 14 +++---
 .../roles/cluster-dump/tasks/main.yml         |  1 +
 103 files changed, 298 insertions(+), 129 deletions(-)
 delete mode 100644 contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml

diff --git a/.ansible-lint b/.ansible-lint
index e1909e966..048a89787 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -18,3 +18,13 @@ skip_list:
   # While it can be useful to have these metadata available, they are also available in the existing documentation.
   # (Disabled in May 2019)
   - '701'
+
+  # [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
+  # Meta roles in Kubespray don't need proper names
+  # (Disabled in June 2021)
+  - 'role-name'
+
+  # [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
+  # In Kubespray we use variables that use camelCase to match their k8s counterparts
+  # (Disabled in June 2021)
+  - 'var-naming'
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 00278bb38..e6aae01ac 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -37,6 +37,7 @@ variables:
 before_script:
   - ./tests/scripts/rebase.sh
   - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
+  - python -m pip uninstall -y ansible
   - python -m pip install -r tests/requirements.txt
   - mkdir -p /.ssh
 
diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index 8d4d41a7f..34b6e2207 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -53,6 +53,7 @@ tox-inventory-builder:
     - ./tests/scripts/rebase.sh
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
   script:
     - pip3 install tox
diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml
index 445393973..92cf7b7db 100644
--- a/.gitlab-ci/vagrant.yml
+++ b/.gitlab-ci/vagrant.yml
@@ -11,6 +11,7 @@ molecule_tests:
     - tests/scripts/rebase.sh
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
     - ./tests/scripts/vagrant_clean.sh
   script:
@@ -31,6 +32,7 @@ molecule_tests:
   before_script:
     - apt-get update && apt-get install -y python3-pip
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+    - python -m pip uninstall -y ansible
     - python -m pip install -r tests/requirements.txt
     - ./tests/scripts/vagrant_clean.sh
   script:
diff --git a/ansible_version.yml b/ansible_version.yml
index da19e9698..cc2bb4134 100644
--- a/ansible_version.yml
+++ b/ansible_version.yml
@@ -4,6 +4,7 @@
   become: no
   vars:
     minimal_ansible_version: 2.9.0
+    minimal_ansible_version_2_10: 2.10.11
     maximal_ansible_version: 2.11.0
     ansible_connection: local
   tasks:
@@ -16,6 +17,17 @@
       tags:
         - check
 
+    - name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
+      assert:
+        msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
+        that:
+          - ansible_version.string is version(minimal_ansible_version_2_10, ">=")
+          - ansible_version.string is version(maximal_ansible_version, "<")
+      when:
+        - ansible_version.string is version('2.10.0', ">=")
+      tags:
+        - check
+
     - name: "Check that python netaddr is installed"
       assert:
         msg: "Python netaddr is not present"
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index ccc5e219a..6176a34e3 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -12,3 +12,4 @@
   template:
     src: inventory.j2
     dest: "{{ playbook_dir }}/inventory"
+    mode: 0644
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index 6ba7d5a87..4c80c9a54 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -22,8 +22,10 @@
   template:
     src: inventory.j2
     dest: "{{ playbook_dir }}/inventory"
+    mode: 0644
 
 - name: Generate Load Balancer variables
   template:
     src: loadbalancer_vars.j2
     dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
+    mode: 0644
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 489250a98..294ee96fc 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -8,11 +8,13 @@
     path: "{{ base_dir }}"
     state: directory
     recurse: true
+    mode: 0755
 
 - name: Store json files in base_dir
   template:
     src: "{{ item }}"
     dest: "{{ base_dir }}/{{ item }}"
+    mode: 0644
   with_items:
     - network.json
     - storage.json
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 5b7c77e49..247a0a8e9 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -35,6 +35,7 @@
       path-exclude=/usr/share/doc/*
       path-include=/usr/share/doc/*/copyright
     dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
+    mode: 0644
   when:
     - ansible_os_family == 'Debian'
 
@@ -63,6 +64,7 @@
   copy:
     content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
     dest: "/etc/sudoers.d/{{ distro_user }}"
+    mode: 0640
 
 - name: Add my pubkey to "{{ distro_user }}" user authorized keys
   authorized_key:
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
index f259c7f07..c2d312302 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
@@ -11,6 +11,7 @@
     state: directory
     owner: "{{ k8s_deployment_user }}"
     group: "{{ k8s_deployment_user }}"
+    mode: 0700
 
 - name: Configure sudo for deployment user
   copy:
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 11269a6e7..0a5859850 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -82,6 +82,7 @@
   template:
     dest: "{{ gluster_mount_dir }}/.test-file.txt"
     src: test-file.txt
+    mode: 0644
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Unmount glusterfs
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml
deleted file mode 100644
index 3646ff420..000000000
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: all
-
-  roles:
-    - role_under_test
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index 93b473295..8d03ffc2f 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -1,7 +1,10 @@
 ---
 - name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
   become: true
-  template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
+  template:
+    src: "heketi-bootstrap.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
+    mode: 0640
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
index 07e86237c..4c6dc130c 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -10,6 +10,7 @@
   template:
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
+    mode: 0644
 - name: "Copy topology configuration into container."
   changed_when: false
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
index 5f00e28aa..3409cf957 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -1,6 +1,9 @@
 ---
 - name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
-  template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
+  template:
+    src: "glusterfs-daemonset.json.j2"
+    dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
+    mode: 0644
   become: true
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
@@ -27,7 +30,10 @@
   delay: 5
 
 - name: "Kubernetes Apps | Lay Down Heketi Service Account"
-  template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
+  template:
+    src: "heketi-service-account.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-service-account.json"
+    mode: 0644
   become: true
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Service Account"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 7b6d37d24..9a6ce55b2 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -4,6 +4,7 @@
   template:
     src: "heketi-deployment.json.j2"
     dest: "{{ kube_config_dir }}/heketi-deployment.json"
+    mode: 0644
   register: "rendering"
 
 - name: "Kubernetes Apps | Install and configure Heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
index 3615f7c6d..3249c87b4 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -5,7 +5,7 @@
   changed_when: false
 
 - name: "Kubernetes Apps | Deploy cluster role binding."
-  when: "clusterrolebinding_state.stdout == \"\""
+  when: "clusterrolebinding_state.stdout | length > 0"
   command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
 
 - name: Get clusterrolebindings again
@@ -15,7 +15,7 @@
 
 - name: Make sure that clusterrolebindings are present now
   assert:
-    that: "clusterrolebinding_state.stdout != \"\""
+    that: "clusterrolebinding_state.stdout | length > 0"
     msg: "Cluster role binding is not present."
 
 - name: Get the heketi-config-secret secret
@@ -28,9 +28,10 @@
   template:
     src: "heketi.json.j2"
     dest: "{{ kube_config_dir }}/heketi.json"
+    mode: 0644
 
 - name: "Deploy Heketi config secret"
-  when: "secret_state.stdout == \"\""
+  when: "secret_state.stdout | length > 0"
   command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
 
 - name: Get the heketi-config-secret secret again
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
index 210930804..055e179a3 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
@@ -2,7 +2,10 @@
 - name: "Kubernetes Apps | Lay Down Heketi Storage"
   become: true
   vars: { nodes: "{{ groups['heketi-node'] }}" }
-  template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
+  template:
+    src: "heketi-storage.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-storage.json"
+    mode: 0644
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Heketi Storage"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index 5bf3e3c4d..3380a612f 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -16,6 +16,7 @@
   template:
     src: "storageclass.yml.j2"
     dest: "{{ kube_config_dir }}/storageclass.yml"
+    mode: 0644
   register: "rendering"
 - name: "Kubernetes Apps | Install and configure Storace Class"
   kube:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
index 4430a5592..f20af1fb9 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -10,6 +10,7 @@
   template:
     src: "topology.json.j2"
     dest: "{{ kube_config_dir }}/topology.json"
+    mode: 0644
 - name: "Copy topology configuration into container."  # noqa 503
   when: "rendering.changed"
   command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
index 43a7b4916..ae98bd8c2 100644
--- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -19,7 +19,7 @@
   become: true
   shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
   register: "volume_groups"
-  ignore_errors: true
+  ignore_errors: true   # noqa ignore-errors
   changed_when: false
 
 - name: "Remove volume groups."  # noqa 301
@@ -35,7 +35,7 @@
     PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH / CentOS conservative path management
   become: true
   command: "pvremove {{ disk_volume_device_1 }} --yes"
-  ignore_errors: true
+  ignore_errors: true   # noqa ignore-errors
 
 - name: "Remove lvm utils (RedHat)"
   become: true
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
index baf25fcb7..608b25de6 100644
--- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -1,51 +1,51 @@
 ---
-- name: "Remove storage class."  # noqa 301
+- name: Remove storage class.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete storageclass gluster"
-  ignore_errors: true
-- name: "Tear down heketi."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down heketi.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
-  ignore_errors: true
-- name: "Tear down heketi."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down heketi.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
-  ignore_errors: true
-- name: "Tear down bootstrap."
+  ignore_errors: true  # noqa ignore-errors
+- name: Tear down bootstrap.
   include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
-- name: "Ensure there is nothing left over."  # noqa 301
+- name: Ensure there is nothing left over.  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Ensure there is nothing left over."  # noqa 301
+- name: Ensure there is nothing left over.  # noqa 301
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
   register: "heketi_result"
   until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
   retries: 60
   delay: 5
-- name: "Tear down glusterfs."  # noqa 301
+- name: Tear down glusterfs.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
-  ignore_errors: true
-- name: "Remove heketi storage service."  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi storage service.  # noqa 301
   command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
-  ignore_errors: true
-- name: "Remove heketi gluster role binding"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi gluster role binding  # noqa 301
   command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
-  ignore_errors: true
-- name: "Remove heketi config secret"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi config secret  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
-  ignore_errors: true
-- name: "Remove heketi db backup"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi db backup  # noqa 301
   command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
-  ignore_errors: true
-- name: "Remove heketi service account"  # noqa 301
+  ignore_errors: true  # noqa ignore-errors
+- name: Remove heketi service account  # noqa 301
   command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
-  ignore_errors: true
-- name: "Get secrets"
+  ignore_errors: true  # noqa ignore-errors
+- name: Get secrets
   command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
   register: "secrets"
   changed_when: false
-- name: "Remove heketi storage secret"
+- name: Remove heketi storage secret
   vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
   command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
   when: "storage_query is defined"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
diff --git a/docs/ansible.md b/docs/ansible.md
index d8ca5a657..30f0e13f3 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -187,3 +187,28 @@ For more information about Ansible and bastion hosts, read
 ## Mitogen
 
 You can use [mitogen](mitogen.md) to speed up kubespray.
+
+## Beyond ansible 2.9
+
+Ansible project has decided, in order to ease their maintenance burden, to split between
+two projects which are now joined under the Ansible umbrella.
+
+Ansible-base (2.10.x branch) will contain just the ansible language implementation while
+ansible modules that were previously bundled into a single repository will be part of the
+ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
+that explains in detail the need and the evolution plan.
+
+**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
+You first need to uninstall your old ansible (pre 2.10) version and install the new one.
+
+```ShellSession
+pip uninstall ansible
+cd kubespray/
+pip install -U .
+```
+
+**Note:** some changes needed to support ansible 2.10+ are not backwards compatible with 2.9
+Kubespray needs to evolve and keep pace with upstream ansible and will be forced to eventually
+drop 2.9 support. Kubespray CIs use only the ansible version specified in the `requirements.txt`
+and while the `ansible_version.yml` may allow older versions to be used, these are not
+exercised in the CI and compatibility is not guaranteed.
diff --git a/mitogen.yml b/mitogen.yml
index 7e8686a44..b39075f13 100644
--- a/mitogen.yml
+++ b/mitogen.yml
@@ -5,7 +5,7 @@
 - hosts: localhost
   strategy: linear
   vars:
-    mitogen_version: 0.2.9
+    mitogen_version: 0.3.0rc1
     mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
     ansible_connection: local
   tasks:
@@ -13,6 +13,7 @@
       file:
         path: "{{ item }}"
         state: directory
+        mode: 0755
       become: false
       loop:
         - "{{ playbook_dir }}/plugins/mitogen"
@@ -40,3 +41,4 @@
         section: defaults
         option: strategy
         value: mitogen_linear
+        mode: 0644
diff --git a/recover-control-plane.yml b/recover-control-plane.yml
index 03d573d3b..4d08f3a90 100644
--- a/recover-control-plane.yml
+++ b/recover-control-plane.yml
@@ -12,13 +12,13 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: "{{ groups['etcd'] | first }}"
+- hosts: etcd[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
     - { role: recover_control_plane/etcd }
 
-- hosts: "{{ groups['kube_control_plane'] | first }}"
+- hosts: kube_control_plane[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
@@ -26,7 +26,7 @@
 
 - include: cluster.yml
 
-- hosts: "{{ groups['kube_control_plane'] }}"
+- hosts: kube_control_plane
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/requirements.txt b/requirements.txt
index e38a00257..09669bdb8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
-ansible==2.9.20
+ansible==3.4.0
+ansible-base==2.10.11
 cryptography==2.8
 jinja2==2.11.3
 netaddr==0.7.19
diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml
index c6158dbce..d638e539e 100644
--- a/roles/bastion-ssh-config/tasks/main.yml
+++ b/roles/bastion-ssh-config/tasks/main.yml
@@ -19,3 +19,4 @@
   template:
     src: ssh-bastion.conf
     dest: "{{ playbook_dir }}/ssh-bastion.conf"
+    mode: 0640
diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml
index f6a57b9d2..a0f4f5882 100644
--- a/roles/bootstrap-os/tasks/bootstrap-centos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml
@@ -12,6 +12,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
@@ -32,6 +33,7 @@
     section: "{{ item }}"
     option: enabled
     value: "1"
+    mode: 0644
   with_items:
     - ol7_latest
     - ol7_addons
@@ -56,6 +58,7 @@
     section: "ol{{ ansible_distribution_major_version }}_addons"
     option: "{{ item.option }}"
     value: "{{ item.value }}"
+    mode: 0644
   with_items:
     - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
     - { option: "enabled", value: "1" }
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
index e999d0506..1a222f664 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
@@ -11,7 +11,7 @@
 - name: Remove podman network cni
   raw: "podman network rm podman"
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when: need_bootstrap.rc != 0
 
 - name: Clean up possible pending packages on fedora coreos
@@ -43,7 +43,7 @@
 - name: Reboot immediately for updated ostree, please run playbook again if failed first time.
   raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   ignore_unreachable: yes
   when: need_bootstrap.rc != 0
 
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
index cfdd76e3a..161317315 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
@@ -17,6 +17,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
index 5b2b6ab94..b30581120 100644
--- a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
@@ -10,7 +10,7 @@
   register: stat_result
 
 - name: Create the /etc/sysconfig/proxy empty file
-  file:
+  file:  # noqa risky-file-permissions
     path: /etc/sysconfig/proxy
     state: touch
   when:
diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
index 5a2bbf553..c6bf43ba4 100644
--- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
@@ -12,6 +12,7 @@
     value: "{{ http_proxy | default(omit) }}"
     state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
     no_extra_spaces: true
+    mode: 0644
   become: true
   when: not skip_http_proxy_on_os_packages
 
@@ -19,7 +20,7 @@
   command: /sbin/subscription-manager status
   register: rh_subscription_status
   changed_when: "rh_subscription_status != 0"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
 
 - name: RHEL subscription Organization ID/Activation Key registration
@@ -35,12 +36,13 @@
       service_level_agreement: "{{ rh_subscription_sla }}"
       sync: true
   notify: RHEL auto-attach subscription
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
   when:
     - rh_subscription_org_id is defined
     - rh_subscription_status.changed
 
+# this task has no_log set to prevent logging security sensitive information such as subscription passwords
 - name: RHEL subscription Username/Password registration
   redhat_subscription:
     state: present
@@ -54,8 +56,9 @@
       service_level_agreement: "{{ rh_subscription_sla }}"
       sync: true
   notify: RHEL auto-attach subscription
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
+  no_log: true
   when:
     - rh_subscription_username is defined
     - rh_subscription_status.changed
diff --git a/roles/container-engine/containerd/molecule/default/converge.yml b/roles/container-engine/containerd/molecule/default/converge.yml
index b70dabf89..26ff82a9e 100644
--- a/roles/container-engine/containerd/molecule/default/converge.yml
+++ b/roles/container-engine/containerd/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: containerd
+    - role: container-engine/containerd
diff --git a/roles/container-engine/containerd/tasks/containerd_repo.yml b/roles/container-engine/containerd/tasks/containerd_repo.yml
index d62468f0c..b26bc84c7 100644
--- a/roles/container-engine/containerd/tasks/containerd_repo.yml
+++ b/roles/container-engine/containerd/tasks/containerd_repo.yml
@@ -23,12 +23,14 @@
   template:
     src: "fedora_containerd.repo.j2"
     dest: "{{ yum_repo_dir }}/containerd.repo"
+    mode: 0644
   when: ansible_distribution == "Fedora"
 
 - name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
   template:
     src: "rh_containerd.repo.j2"
     dest: "{{ yum_repo_dir }}/containerd.repo"
+    mode: 0644
   when:
     - ansible_os_family == "RedHat"
     - ansible_distribution not in ["Fedora", "Amazon"]
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index 5becf1f3d..e4ba9983b 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -58,11 +58,13 @@
   file:
     path: /etc/systemd/system/containerd.service.d
     state: directory
+    mode: 0755
 
 - name: Write containerd proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
+    mode: 0644
   notify: restart containerd
   when: http_proxy is defined or https_proxy is defined
 
@@ -116,7 +118,7 @@
     - not is_ostree
     - containerd_package_info.pkgs|length > 0
 
-- include_role:
+- include_role:  # noqa unnamed-task
     name: container-engine/crictl
 
 # you can sometimes end up in a state where everything is installed
diff --git a/roles/container-engine/cri-o/molecule/default/converge.yml b/roles/container-engine/cri-o/molecule/default/converge.yml
index fdb8fb600..5235ae330 100644
--- a/roles/container-engine/cri-o/molecule/default/converge.yml
+++ b/roles/container-engine/cri-o/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: cri-o
+    - role: container-engine/cri-o
diff --git a/roles/container-engine/cri-o/tasks/crio_repo.yml b/roles/container-engine/cri-o/tasks/crio_repo.yml
index 23447ee43..b0ca20725 100644
--- a/roles/container-engine/cri-o/tasks/crio_repo.yml
+++ b/roles/container-engine/cri-o/tasks/crio_repo.yml
@@ -53,6 +53,7 @@
     option: enabled
     value: "0"
     backup: yes
+    mode: 0644
   when:
     - ansible_distribution in ["Amazon"]
     - amzn2_extras_file_stat.stat.exists
@@ -119,6 +120,7 @@
     section: "{{ item.section }}"
     option: enabled
     value: 1
+    mode: 0644
   become: true
   when: is_ostree
   loop:
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index c152a5232..3fda1aeb4 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -46,7 +46,7 @@
   import_tasks: "crio_repo.yml"
   when: crio_add_repos
 
-- include_role:
+- include_role:  # noqa unnamed-task
     name: container-engine/crictl
 
 - name: Build a list of crio runtimes with Katacontainers runtimes
@@ -69,11 +69,13 @@
   file:
     path: "{{ item }}"
     state: directory
+    mode: 0755
 
 - name: Install cri-o config
   template:
     src: crio.conf.j2
     dest: /etc/crio/crio.conf
+    mode: 0644
   register: config_install
 
 - name: Add skopeo pkg to install
@@ -129,6 +131,7 @@
   copy:
     src: mounts.conf
     dest: /etc/containers/mounts.conf
+    mode: 0644
   when:
     - ansible_os_family == 'RedHat'
   notify: restart crio
@@ -147,6 +150,7 @@
     section: storage.options.overlay
     option: mountopt
     value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
+    mode: 0644
 
 - name: Create directory registries configs
   file:
@@ -159,6 +163,7 @@
   template:
     src: registry-mirror.conf.j2
     dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
+    mode: 0644
   loop: "{{ crio_registries_mirrors }}"
   notify: restart crio
 
@@ -166,6 +171,7 @@
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/crio.service.d/http-proxy.conf
+    mode: 0644
   notify: restart crio
   when: http_proxy is defined or https_proxy is defined
 
diff --git a/roles/container-engine/docker/molecule/default/converge.yml b/roles/container-engine/docker/molecule/default/converge.yml
index 68c44d26d..afe7a8eb3 100644
--- a/roles/container-engine/docker/molecule/default/converge.yml
+++ b/roles/container-engine/docker/molecule/default/converge.yml
@@ -4,4 +4,4 @@
   become: true
   roles:
     - role: kubespray-defaults
-    - role: docker
+    - role: container-engine/docker
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index ee779279e..39df9e886 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -80,12 +80,14 @@
   template:
     src: "fedora_docker.repo.j2"
     dest: "{{ yum_repo_dir }}/docker.repo"
+    mode: 0644
   when: ansible_distribution == "Fedora" and not is_ostree
 
 - name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
   template:
     src: "rh_docker.repo.j2"
     dest: "{{ yum_repo_dir }}/docker-ce.repo"
+    mode: 0644
   when:
     - ansible_os_family == "RedHat"
     - ansible_distribution != "Fedora"
@@ -145,7 +147,7 @@
         state: started
       when: docker_task_result is not changed
   rescue:
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Docker start failed. Try to remove our config"
     - name: remove kubespray generated config
       file:
diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml
index b98ae2353..a57061c15 100644
--- a/roles/container-engine/docker/tasks/systemd.yml
+++ b/roles/container-engine/docker/tasks/systemd.yml
@@ -3,11 +3,13 @@
   file:
     path: /etc/systemd/system/docker.service.d
     state: directory
+    mode: 0755
 
 - name: Write docker proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/docker.service.d/http-proxy.conf
+    mode: 0644
   notify: restart docker
   when: http_proxy is defined or https_proxy is defined
 
@@ -25,6 +27,7 @@
   template:
     src: docker.service.j2
     dest: /etc/systemd/system/docker.service
+    mode: 0644
   register: docker_service_file
   notify: restart docker
   when:
@@ -35,12 +38,14 @@
   template:
     src: docker-options.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
+    mode: 0644
   notify: restart docker
 
 - name: Write docker dns systemd drop-in
   template:
     src: docker-dns.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
+    mode: 0644
   notify: restart docker
   when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
 
@@ -55,7 +60,9 @@
   template:
     src: docker-orphan-cleanup.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
+    mode: 0644
   notify: restart docker
   when: docker_orphan_clean_up | bool
 
-- meta: flush_handlers
+- name: Flush handlers
+  meta: flush_handlers
diff --git a/roles/container-engine/gvisor/molecule/default/converge.yml b/roles/container-engine/gvisor/molecule/default/converge.yml
index 8bf5478e8..b14d078a1 100644
--- a/roles/container-engine/gvisor/molecule/default/converge.yml
+++ b/roles/container-engine/gvisor/molecule/default/converge.yml
@@ -7,5 +7,5 @@
     container_manager: containerd
   roles:
     - role: kubespray-defaults
-    - role: containerd
-    - role: gvisor
+    - role: container-engine/containerd
+    - role: container-engine/gvisor
diff --git a/roles/container-engine/gvisor/molecule/default/prepare.yml b/roles/container-engine/gvisor/molecule/default/prepare.yml
index 084824830..e5a7e773c 100644
--- a/roles/container-engine/gvisor/molecule/default/prepare.yml
+++ b/roles/container-engine/gvisor/molecule/default/prepare.yml
@@ -5,7 +5,7 @@
   roles:
     - role: kubespray-defaults
     - role: bootstrap-os
-    - role: ../adduser
+    - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
     - include_tasks: "../../../../download/tasks/download_file.yml"
@@ -20,8 +20,8 @@
     kube_network_plugin: cni
   roles:
     - role: kubespray-defaults
-    - role: ../network_plugin/cni
-    - role: crictl
+    - role: network_plugin/cni
+    - role: container-engine/crictl
   tasks:
     - name: Copy test container files
       copy:
diff --git a/roles/container-engine/kata-containers/molecule/default/converge.yml b/roles/container-engine/kata-containers/molecule/default/converge.yml
index 995705705..3456ee6f8 100644
--- a/roles/container-engine/kata-containers/molecule/default/converge.yml
+++ b/roles/container-engine/kata-containers/molecule/default/converge.yml
@@ -6,5 +6,5 @@
     kata_containers_enabled: true
   roles:
     - role: kubespray-defaults
-    - role: containerd
-    - role: kata-containers
+    - role: container-engine/containerd
+    - role: container-engine/kata-containers
diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml
index 3e4bce907..8d99e5255 100644
--- a/roles/container-engine/kata-containers/tasks/main.yml
+++ b/roles/container-engine/kata-containers/tasks/main.yml
@@ -15,11 +15,13 @@
   file:
     path: "{{ kata_containers_config_dir }}"
     state: directory
+    mode: 0755
 
 - name: kata-containers | Set configuration
   template:
     src: "{{ item }}.j2"
     dest: "{{ kata_containers_config_dir }}/{{ item }}"
+    mode: 0644
   with_items:
     - configuration-qemu.toml
 
diff --git a/roles/container-engine/meta/main.yml b/roles/container-engine/meta/main.yml
index 2f6bff147..8bd98bd02 100644
--- a/roles/container-engine/meta/main.yml
+++ b/roles/container-engine/meta/main.yml
@@ -1,3 +1,4 @@
+# noqa role-name - this is a meta role that doesn't need a name
 ---
 dependencies:
   - role: container-engine/kata-containers
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 21b3cbd22..79e92632e 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -18,7 +18,7 @@
       when:
         - not download_always_pull
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
 
     - name: download_container | Determine if image is in cache
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index a6725fdcf..f7dcfda10 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -48,6 +48,7 @@
     - not download_localhost
 
   # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
+  # This task will avoid logging it's parameters to not leak environment passwords in the log
   - name: download_file | Download item
     get_url:
       url: "{{ download.url }}"
@@ -67,6 +68,7 @@
     retries: 4
     delay: "{{ retry_stagger | default(5) }}"
     environment: "{{ proxy_env }}"
+    no_log: true
 
   - name: download_file | Copy file back to ansible host file cache
     synchronize:
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 475040a50..e57266bd8 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -38,7 +38,7 @@
   run_once: true
   register: test_become
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: true
   when:
     - download_localhost
@@ -53,7 +53,7 @@
   run_once: true
   register: test_docker
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   become: false
   when:
     - download_localhost
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index c520a9416..aa21849e0 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -18,6 +18,7 @@
   template:
     src: "kubeadm-images.yaml.j2"
     dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
+    mode: 0644
   when:
     - not skip_kubeadm_images|default(false)
 
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 331dec72f..7534e4176 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -45,6 +45,7 @@
     src: "etcd-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd.service
     backup: yes
+    mode: 0644
   when: is_etcd_master and etcd_cluster_setup
 
 - name: Configure | Copy etcd-events.service systemd file
@@ -52,6 +53,7 @@
     src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd-events.service
     backup: yes
+    mode: 0644
   when: is_etcd_master and etcd_events_cluster_setup
 
 - name: Configure | reload systemd
@@ -65,7 +67,7 @@
     name: etcd
     state: started
     enabled: yes
-  ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
+  ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_cluster_setup
 
 # when scaling new etcd will fail to start
@@ -74,7 +76,7 @@
     name: etcd-events
     state: started
     enabled: yes
-  ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
+  ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_events_cluster_setup
 
 - name: Configure | Wait for etcd cluster to be healthy
@@ -126,7 +128,7 @@
 - name: Configure | Check if member is in etcd cluster
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
   register: etcd_member_in_cluster
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   when: is_etcd_master and etcd_cluster_setup
@@ -142,7 +144,7 @@
 - name: Configure | Check if member is in etcd-events cluster
   shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
   register: etcd_events_member_in_cluster
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 2b4e9297c..3825e3c62 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -21,6 +21,7 @@
   template:
     src: "openssl.conf.j2"
     dest: "{{ etcd_config_dir }}/openssl.conf"
+    mode: 0640
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index 21c308fb0..57010fee1 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -3,6 +3,7 @@
   template:
     src: etcd.env.j2
     dest: /etc/etcd.env
+    mode: 0640
   notify: restart etcd
   when: is_etcd_master and etcd_cluster_setup
 
@@ -10,5 +11,6 @@
   template:
     src: etcd-events.env.j2
     dest: /etc/etcd-events.env
+    mode: 0640
   notify: restart etcd-events
   when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index 0d1ba1231..cfa4965b3 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -21,6 +21,7 @@
     src: "{{ etcd_cert_dir }}/ca.pem"
     dest: "{{ ca_cert_path }}"
     remote_src: true
+    mode: 0640
   register: etcd_ca_cert
 
 - name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar)  # noqa 503
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
index 2f774cfcd..538fc22fc 100644
--- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -3,7 +3,7 @@
   shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
   register: createdby_annotation
   changed_when: false
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   when:
     - dns_mode in ['coredns', 'coredns_dual']
     - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 2f5f110af..c477c2a41 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -30,6 +30,7 @@
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
+    mode: 0640
   register: psp_manifests
   with_items:
     - {file: psp.yml, type: psp, name: psp}
@@ -61,6 +62,7 @@
   template:
     src: "node-crb.yml.j2"
     dest: "{{ kube_config_dir }}/node-crb.yml"
+    mode: 0640
   register: node_crb_manifest
   when:
     - rbac_enabled
@@ -86,6 +88,7 @@
   template:
     src: "node-webhook-cr.yml.j2"
     dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
+    mode: 0640
   register: node_webhook_cr_manifest
   when:
     - rbac_enabled
@@ -111,6 +114,7 @@
   template:
     src: "node-webhook-crb.yml.j2"
     dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
+    mode: 0640
   register: node_webhook_crb_manifest
   when:
     - rbac_enabled
@@ -139,7 +143,7 @@
     - cloud_provider == 'oci'
 
 - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
-  copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
+  copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
   when: inventory_hostname == groups['kube_control_plane']|last
 
 - name: PriorityClass | Create k8s-cluster-critical
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
index 72142eae6..eb074634e 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -3,6 +3,7 @@
   copy:
     src: "oci-rbac.yml"
     dest: "{{ kube_config_dir }}/oci-rbac.yml"
+    mode: 0640
   when:
   - cloud_provider is defined
   - cloud_provider == 'oci'
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
index 15b2ecf2b..1c1534698 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: CephFS Provisioner | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -21,7 +21,7 @@
 - name: CephFS Provisioner | Remove legacy storageclass
   shell: |
     {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
index e25e0b143..06bc18849 100644
--- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: RBD Provisioner | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errrors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -21,7 +21,7 @@
 - name: RBD Provisioner | Remove legacy storageclass
   shell: |
     {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errrors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
@@ -63,6 +63,7 @@
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
+    mode: 0644
   with_items: "{{ rbd_provisioner_templates }}"
   register: rbd_provisioner_manifests
   when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
index 42112b0d5..4217c6075 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -12,7 +12,7 @@
 - name: Cert Manager | Remove legacy namespace
   shell: |
     {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
   tags:
diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml
index 12e704541..551f2f28a 100644
--- a/roles/kubernetes-apps/metallb/tasks/main.yml
+++ b/roles/kubernetes-apps/metallb/tasks/main.yml
@@ -55,7 +55,7 @@
   command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
   register: metallb_secret
   become: true
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index 3e483bf7f..45a64d2b2 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -12,12 +12,12 @@
   run_once: true
 
 - name: kube-router | Wait for kube-router pods to be ready
-  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601 ignore-errors
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
   retries: 30
   delay: 10
-  ignore_errors: yes
+  ignore_errors: true
   delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
   changed_when: false
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
index 1e1dda97f..36bb62798 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
@@ -12,7 +12,7 @@
     - apiserver-kubelet-client.key
     - front-proxy-client.crt
     - front-proxy-client.key
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Backup old confs
   copy:
@@ -25,4 +25,4 @@
     - controller-manager.conf
     - kubelet.conf
     - scheduler.conf
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index b362a2a49..6176ba893 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -50,18 +50,21 @@
   file:
     path: "{{ audit_policy_file | dirname }}"
     state: directory
+    mode: 0640
   when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
 
 - name: Write api audit policy yaml
   template:
     src: apiserver-audit-policy.yaml.j2
     dest: "{{ audit_policy_file }}"
+    mode: 0640
   when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
 
 - name: Write api audit webhook config yaml
   template:
     src: apiserver-audit-webhook-config.yaml.j2
     dest: "{{ audit_webhook_config_file }}"
+    mode: 0640
   when: kubernetes_audit_webhook|default(false)
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index a073b5ded..ea2dd2d02 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -7,12 +7,14 @@
   template:
     src: webhook-token-auth-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
+    mode: 0640
   when: kube_webhook_token_auth|default(false)
 
 - name: Create webhook authorization config
   template:
     src: webhook-authorization-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
+    mode: 0640
   when: kube_webhook_authorization|default(false)
 
 - name: Create kube-scheduler config
@@ -40,7 +42,7 @@
   when: ansible_os_family in ["Debian","RedHat"]
   tags:
     - kubectl
-  ignore_errors: True
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Set kubectl bash completion file permissions
   file:
@@ -52,7 +54,7 @@
   tags:
     - kubectl
     - upgrade
-  ignore_errors: True
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
   set_fact:
@@ -77,12 +79,13 @@
   template:
     src: k8s-certs-renew.sh.j2
     dest: "{{ bin_dir }}/k8s-certs-renew.sh"
-    mode: '755'
+    mode: 0755
 
 - name: Renew K8S control plane certificates monthly 1/2
   template:
     src: "{{ item }}.j2"
     dest: "/etc/systemd/system/{{ item }}"
+    mode: 0644
   with_items:
     - k8s-certs-renew.service
     - k8s-certs-renew.timer
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 5cb654320..6a02f0dab 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -61,6 +61,7 @@
     src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubeadm-client.conf"
     backup: yes
+    mode: 0640
   when: not is_kube_master
 
 - name: Join to cluster if needed
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index d01fda835..b7f8138a6 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -35,8 +35,10 @@
     - node_labels is defined
     - node_labels is mapping
 
-- debug: var=role_node_labels
-- debug: var=inventory_node_labels
+- debug:  # noqa unnamed-task
+    var: role_node_labels
+- debug:  # noqa unnamed-task
+    var: inventory_node_labels
 
 - name: Set label to node
   command: >-
diff --git a/roles/kubernetes/node/tasks/kubelet.yml b/roles/kubernetes/node/tasks/kubelet.yml
index 8bff4077c..88204e012 100644
--- a/roles/kubernetes/node/tasks/kubelet.yml
+++ b/roles/kubernetes/node/tasks/kubelet.yml
@@ -18,6 +18,7 @@
     src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubelet.env"
     backup: yes
+    mode: 0640
   notify: Node | restart kubelet
   tags:
     - kubelet
@@ -27,6 +28,7 @@
   template:
     src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
     dest: "{{ kube_config_dir }}/kubelet-config.yaml"
+    mode: 0640
   notify: Kubelet | restart kubelet
   tags:
     - kubelet
@@ -37,6 +39,7 @@
     src: "kubelet.service.j2"
     dest: "/etc/systemd/system/kubelet.service"
     backup: "yes"
+    mode: 0644
   notify: Node | restart kubelet
   tags:
     - kubelet
diff --git a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
index 972878bf7..67f40f6dd 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
@@ -31,3 +31,4 @@
   template:
     src: manifests/haproxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/haproxy.yml"
+    mode: 0640
diff --git a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
index f90084cbc..e176cb976 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
@@ -31,3 +31,4 @@
   template:
     src: manifests/nginx-proxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
+    mode: 0640
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index d4b7f5014..4cb29d65a 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -57,6 +57,7 @@
   file:
     path: /etc/modules-load.d
     state: directory
+    mode: 0755
 
 - name: Enable br_netfilter module
   modprobe:
@@ -68,6 +69,7 @@
   copy:
     dest: /etc/modules-load.d/kubespray-br_netfilter.conf
     content: br_netfilter
+    mode: 0644
   when: modinfo_br_netfilter.rc == 0
 
 # kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
@@ -108,7 +110,7 @@
     name: nf_conntrack_ipv4
     state: present
   register: modprobe_nf_conntrack_ipv4
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - kube_proxy_mode == 'ipvs'
   tags:
@@ -117,6 +119,7 @@
 - name: Persist ip_vs modules
   copy:
     dest: /etc/modules-load.d/kube_proxy-ipvs.conf
+    mode: 0644
     content: |
       ip_vs
       ip_vs_rr
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
index 73028e0f6..74789319e 100644
--- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -16,4 +16,4 @@
 - name: Disable swap
   command: /sbin/swapoff -a
   when: swapon.stdout
-  ignore_errors: "{{ ansible_check_mode }}"
+  ignore_errors: "{{ ansible_check_mode }}"  # noqa ignore-errors
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index f184670ab..312df995a 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -4,6 +4,7 @@
     path: "{{ item }}"
     state: directory
     owner: kube
+    mode: 0755
   when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
@@ -28,6 +29,7 @@
     path: "{{ item }}"
     state: directory
     owner: root
+    mode: 0755
   when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
@@ -59,6 +61,7 @@
     src: "{{ kube_cert_dir }}"
     dest: "{{ kube_cert_compat_dir }}"
     state: link
+    mode: 0755
   when:
     - inventory_hostname in groups['k8s_cluster']
     - kube_cert_dir != kube_cert_compat_dir
@@ -69,6 +72,7 @@
     path: "{{ item }}"
     state: directory
     owner: kube
+    mode: 0755
   with_items:
     - "/etc/cni/net.d"
     - "/opt/cni/bin"
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 39921595e..332f49d86 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -18,6 +18,7 @@
     create: yes
     backup: yes
     marker: "# Ansible entries {mark}"
+    mode: 0644
   notify: Preinstall | propagate resolvconf to k8s components
 
 - name: Remove search/domain/nameserver options before block
diff --git a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
index b8b673bd2..d24a4fffa 100644
--- a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
+++ b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
@@ -19,6 +19,7 @@
       [keyfile]
       unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
     dest: /etc/NetworkManager/conf.d/calico.conf
+    mode: 0644
   when:
     - nm_check.rc == 0
     - kube_network_plugin == "calico"
@@ -32,5 +33,6 @@
       [keyfile]
       unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
     dest: /etc/NetworkManager/conf.d/k8s.conf
+    mode: 0644
   when: nm_check.rc == 0
   notify: Preinstall | reload NetworkManager
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index 051b9aa35..ddc33fa32 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -30,6 +30,7 @@
     state: present
     create: yes
     backup: yes
+    mode: 0644
   when:
     - disable_ipv6_dns
     - not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
@@ -59,6 +60,7 @@
   file:
     name: "{{ sysctl_file_path | dirname }}"
     state: directory
+    mode: 0755
 
 - name: Enable ip forwarding
   sysctl:
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 95bc711dc..32a2c8e77 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -22,6 +22,7 @@
     backup: yes
     unsafe_writes: yes
     marker: "# Ansible inventory hosts {mark}"
+    mode: 0644
   when: populate_inventory_to_hosts_file
 
 - name: Hosts | populate kubernetes loadbalancer address into hosts file
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 6599a21d4..28aed0740 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -11,6 +11,7 @@
     insertbefore: BOF
     backup: yes
     marker: "# Ansible entries {mark}"
+    mode: 0644
   notify: Preinstall | propagate resolvconf to k8s components
   when: dhclientconffile is defined
 
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 3d3451221..bf15a7055 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -91,7 +91,8 @@
 
 # We need to make sure the network is restarted early enough so that docker can later pick up the correct system
 # nameservers and search domains
-- meta: flush_handlers
+- name: Flush handlers
+  meta: flush_handlers
 
 - name: Check if we are running inside a Azure VM
   stat:
diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml
index 9824337e2..4715603d9 100644
--- a/roles/network_plugin/cilium/tasks/apply.yml
+++ b/roles/network_plugin/cilium/tasks/apply.yml
@@ -16,7 +16,7 @@
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: 30
   delay: 10
-  ignore_errors: yes
+  fail_when: false
   when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cilium | Hubble install
diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml
index f107eed64..e331f2b14 100644
--- a/roles/network_plugin/kube-router/tasks/main.yml
+++ b/roles/network_plugin/kube-router/tasks/main.yml
@@ -23,7 +23,7 @@
   slurp:
     src: /etc/cni/net.d/10-kuberouter.conflist
   register: cni_config_slurp
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - name: kube-router | Set cni_config variable
   set_fact:
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index a0e702575..e3dc33930 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Get etcd endpoint health
   command: "{{ bin_dir }}/etcdctl endpoint health"
   register: etcd_endpoint_health
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   environment:
@@ -38,13 +38,13 @@
     state: absent
   delegate_to: "{{ item }}"
   with_items: "{{ groups['broken_etcd'] }}"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   when:
     - groups['broken_etcd']
     - has_quorum
 
 - name: Delete old certificates
-  # noqa 302 - rm is ok here for now
+  # noqa 302 ignore-error - rm is ok here for now
   shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
   with_items: "{{ groups['broken_etcd'] }}"
   register: delete_old_cerificates
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index bef89f192..1ecc90fef 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -13,6 +13,7 @@
   copy:
     src: "{{ etcd_snapshot }}"
     dest: /tmp/snapshot.db
+    mode: 0640
   when: etcd_snapshot is defined
 
 - name: Stop etcd
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index fd4c6fc58..3205c008f 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: Delete node  # noqa 301
+- name: Delete node  # noqa 301 ignore-errors
   command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
   delegate_to: "{{ groups['kube_control_plane']|first }}"
-  ignore_errors: yes
\ No newline at end of file
+  ignore_errors: true
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index 5c800044f..c69dd9069 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -27,7 +27,7 @@
 - name: Lookup etcd member id
   shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
   register: etcd_member_id
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: no
   tags:
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 0d17d2e88..00029b09b 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -86,7 +86,7 @@
   when:
     - crictl.stat.exists
     - container_manager in ["crio", "containerd"]
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - name: reset | force remove all cri containers
   command: "{{ bin_dir }}/crictl rm -a -f"
@@ -129,7 +129,7 @@
   when:
     - crictl.stat.exists
     - container_manager == "containerd"
-  ignore_errors: true
+  ignore_errors: true  # noqa ignore-errors
 
 - block:
     - name: reset | force remove all cri pods
@@ -206,7 +206,7 @@
 
 - name: Clear IPVS virtual server table
   command: "ipvsadm -C"
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   when:
     - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
 
@@ -306,7 +306,7 @@
     - /etc/modules-load.d/kube_proxy-ipvs.conf
     - /etc/modules-load.d/kubespray-br_netfilter.conf
     - /usr/libexec/kubernetes
-  ignore_errors: yes
+  ignore_errors: true  # noqa ignore-errors
   tags:
     - files
 
@@ -333,7 +333,7 @@
     - dns
 
 - name: reset | include file with reset tasks specific to the network_plugin if exists
-  include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath  }}"
+  include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | path_join | realpath  }}"
   when:
     - kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
   tags:
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index 32f511a4e..77da68352 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -29,10 +29,12 @@
       register: patch_kube_proxy_state
       when: current_kube_proxy_state.stdout | trim | lower != "linux"
 
-    - debug: msg={{ patch_kube_proxy_state.stdout_lines }}
+    - debug:  # noqa unnamed-task
+        msg: "{{ patch_kube_proxy_state.stdout_lines }}"
       when: patch_kube_proxy_state is not skipped
 
-    - debug: msg={{ patch_kube_proxy_state.stderr_lines }}
+    - debug:  # noqa unnamed-task
+        msg: "{{ patch_kube_proxy_state.stderr_lines }}"
       when: patch_kube_proxy_state is not skipped
   tags: init
   when:
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 957c1aed7..3f31217d3 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -135,6 +135,7 @@
         path: "/tmp/{{ archive_dirname }}"
         dest: "{{ dir|default('.') }}/logs.tar.gz"
         remove: true
+        mode: 0640
       delegate_to: localhost
       connection: local
       become: false
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index e2ef4e8fc..a0b36bebb 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -4,6 +4,7 @@
   file:
     state: directory
     path: "{{ images_dir }}"
+    mode: 0755
 
 - name: Download images files
   get_url:
@@ -39,6 +40,7 @@
   template:
     src: Dockerfile
     dest: "{{ images_dir }}/Dockerfile"
+    mode: 0644
 
 - name: Create docker images for each OS  # noqa 301
   command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index a1982edfa..8a03c9259 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -22,3 +22,4 @@
     template:
       src: ../templates/inventory-aws.j2  # noqa 404 CI inventory templates are not in role_path
       dest: "{{ inventory_path }}"
+      mode: 0644
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 3b58aa0d3..3726eb158 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -79,7 +79,7 @@
       register: droplets
       with_items: "{{ instance_names }}"
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "{{ droplets }}, {{ inventory_path }}"
       when: state == 'present'
 
@@ -87,4 +87,5 @@
       template:
         src: ../templates/inventory-do.j2  # noqa 404 CI templates are not in role_path
         dest: "{{ inventory_path }}"
+        mode: 0644
       when: state == 'present'
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index f9f474f83..f94b05bcb 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -28,7 +28,7 @@
           {%- endif -%}
 
     - name: Create gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         machine_type: "{{ cloud_machine_type }}"
         image: "{{ cloud_image | default(omit) }}"
@@ -53,17 +53,20 @@
       template:
         src: ../templates/inventory-gce.j2
         dest: "{{ inventory_path }}"
+        mode: 0644
 
     - name: Make group_vars directory
       file:
         path: "{{ inventory_path|dirname }}/group_vars"
         state: directory
+        mode: 0755
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Template fake hosts group vars  # noqa 404 CI templates are not in role_path
       template:
         src: ../templates/fake_hosts.yml.j2
         dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
+        mode: 0644
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Delete group_vars directory
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index 00e671240..b88abea1c 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -20,7 +20,7 @@
           {%- endif -%}
 
     - name: stop gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
@@ -34,7 +34,7 @@
       register: gce
 
     - name: delete gce instances
-      gce:
+      google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
index bbbce6e01..d939db02c 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
@@ -12,11 +12,13 @@
   file:
     path: "/tmp/{{ test_name }}"
     state: directory
+    mode: 0755
 
 - name: Template vm files for CI job
   template:
     src: "vm.yml.j2"
     dest: "/tmp/{{ test_name }}/instance-{{ vm_id }}.yml"
+    mode: 0644
   loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
   loop_control:
     index_var: vm_id
@@ -47,5 +49,6 @@
   template:
     src: "inventory.j2"
     dest: "{{ inventory_path }}"
+    mode: 0644
   vars:
     vms: "{{ vm_ips }}"
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index 679f80558..eeb0edb79 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -33,11 +33,13 @@
       template:
         src: gcs_life.json.j2
         dest: "{{ dir }}/gcs_life.json"
+        mode: 0644
 
     - name: Create a boto config to access GCS
       template:
         src: boto.j2
         dest: "{{ dir }}/.boto"
+        mode: 0640
       no_log: True
 
     - name: Download gsutil cp installer
@@ -74,5 +76,5 @@
       failed_when: false
       no_log: True
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 651990756..2524ef93c 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -4,7 +4,7 @@ apache-libcloud==2.2.1
 tox==3.11.1
 dopy==0.3.7
 cryptography==2.8
-ansible-lint==4.2.0
+ansible-lint==5.0.11
 openshift==0.8.8
 molecule==3.0.6
 molecule-vagrant==0.3
diff --git a/tests/scripts/testcases_prepare.sh b/tests/scripts/testcases_prepare.sh
index 454315783..d70086a2b 100755
--- a/tests/scripts/testcases_prepare.sh
+++ b/tests/scripts/testcases_prepare.sh
@@ -1,6 +1,7 @@
 #!/bin/bash
 set -euxo pipefail
 
+/usr/bin/python -m pip uninstall -y ansible
 /usr/bin/python -m pip install -r tests/requirements.txt
 mkdir -p /.ssh
 mkdir -p cluster-dump
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index adf0a35c9..e84bad264 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -9,7 +9,7 @@
       status_code: 200
     register: apiserver_response
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ apiserver_response.json }}"
 
   - name: Check API servers version
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index 0faa1d46b..34f592394 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_nodes
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_nodes.stdout.split('\n') }}"
 
   - name: Check that all nodes are running and ready
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index edea22a5c..c83c9be90 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -12,7 +12,7 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check kubectl output
@@ -21,7 +21,7 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
@@ -44,6 +44,6 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index a9386db88..13f353b79 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -34,7 +34,7 @@
       when: get_csr.stdout_lines | length > 0
       changed_when: certificate_approve.stdout
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "{{ certificate_approve.stdout.split('\n') }}"
 
     when: kubelet_rotate_server_certificates | default(false)
@@ -60,7 +60,7 @@
     - busybox1
     - busybox2
 
-  - import_role:
+  - import_role:  # noqa unnamed-task
       name: cluster-dump
 
   - name: Check that all pods are running and ready
@@ -83,7 +83,7 @@
     register: pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
@@ -92,7 +92,7 @@
             jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     changed_when: false
     register: hostnet_pods
-    ignore_errors: true
+    ignore_errors: true  # noqa ignore-errors
     no_log: true
 
   - name: Get running pods
@@ -108,7 +108,7 @@
     register: get_pods
     no_log: true
 
-  - debug:
+  - debug:  # noqa unnamed-task
       msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Set networking facts
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 18cf6daf1..358a19983 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -26,7 +26,7 @@
         bin_dir: "/usr/local/bin"
       when: not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
 
-    - import_role:
+    - import_role:  # noqa unnamed-task
         name: cluster-dump
 
     - name: Wait for netchecker server
@@ -60,7 +60,7 @@
         - netchecker-agent-hostnet
       when: not nca_pod is success
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: nca_pod.stdout_lines
       failed_when: not nca_pod is success
       when: inventory_hostname == groups['kube_control_plane'][0]
@@ -80,7 +80,7 @@
       failed_when: false
       no_log: true
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: agents.content | from_json
       failed_when: not agents is success and not agents.content=='{}'
       run_once: true
@@ -106,7 +106,7 @@
       when:
         - agents.content != '{}'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: ncs_pod
       run_once: true
       when: not result is success
@@ -131,7 +131,7 @@
         - calico-node
         - cilium
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: result.content | from_json
       failed_when: not result is success
       run_once: true
@@ -140,14 +140,14 @@
         - result.content
         - result.content[0] == '{'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         var: result
       failed_when: not result is success
       run_once: true
       when:
         - not agents.content == '{}'
 
-    - debug:
+    - debug:  # noqa unnamed-task
         msg: "Cannot get reports from agents, consider as PASSING"
       run_once: true
       when:
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index 966a13c3d..96419e8a2 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -8,6 +8,7 @@
   archive:
     path: /tmp/cluster-dump
     dest: /tmp/cluster-dump.tgz
+    mode: 0644
   when: inventory_hostname in groups['kube_control_plane']
 
 - name: Fetch dump file
-- 
GitLab