diff --git a/.ansible-lint b/.ansible-lint
index d84419e6a41ded358243c164fa4e3b8c51554797..9ea65c48b59342fa074dba750fb3a19b759a0fcb 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -35,6 +35,41 @@ skip_list:
   # Roles in kubespray don't need fully qualified collection names
   # (Disabled in Feb 2023)
   - 'fqcn-builtins'
+
+  # names should start with an uppercase letter
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'name[casing]'
+
+  # Everything should be named
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'name[play]'
+  - 'name[missing]'
+
+  # templates should only be at the end of 'name'
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'name[jinja]'
+  - 'name[template]'
+
+  # order of keys errors
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'key-order'
+
+  # No changed-when on commands
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'no-changed-when'
+
+  # Disable galaxy rules
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'galaxy'
+
+  # Disable run-once check with free strategy
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'run-once[task]'
+
+  # Disable outdated-tag check
+  # (Disabled in June 2023 after ansible upgrade; FIXME)
+  - 'warning[outdated-tag]'
 exclude_paths:
   # Generated files
   - tests/files/custom_cni/cilium.yaml
+  - venv
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9b805e2961e909a72282953f3be306257c3e5006..5b6a9e41fb37dc113cefc3b3aaa44eebab8625dd 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -42,6 +42,7 @@ before_script:
   - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
   - python -m pip uninstall -y ansible ansible-base ansible-core
   - python -m pip install -r tests/requirements.txt
+  - ansible-galaxy install -r tests/requirements.yml
   - mkdir -p /.ssh
 
 .job: &job
diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index f063ea08553f35da304caedd3f2c6b626a821b4a..51560118dabb5da4b387ab43ffa93f4ddf6112b1 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -71,6 +71,7 @@ tox-inventory-builder:
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
     - python -m pip uninstall -y ansible ansible-base ansible-core
     - python -m pip install -r tests/requirements.txt
+    - ansible-galaxy install -r tests/requirements.yml
   script:
     - pip3 install tox
     - cd contrib/inventory_builder && tox
diff --git a/.gitlab-ci/molecule.yml b/.gitlab-ci/molecule.yml
index 736c0ffd755164ac91ff6164d311d5fc6658f6b7..901f5fc355869ca5beaa69971f107e08ee5ac49f 100644
--- a/.gitlab-ci/molecule.yml
+++ b/.gitlab-ci/molecule.yml
@@ -13,6 +13,7 @@
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
     - python -m pip uninstall -y ansible ansible-base ansible-core
     - python -m pip install -r tests/requirements.txt
+    - ansible-galaxy install -r tests/requirements.yml
     - ./tests/scripts/vagrant_clean.sh
   script:
     - ./tests/scripts/molecule_run.sh
diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml
index e7dbf73ff9febd677fb6698cd63e02d97a3849b0..dba24bbb2eb5a451f66a1cf6c808f7147a1b69a7 100644
--- a/.gitlab-ci/vagrant.yml
+++ b/.gitlab-ci/vagrant.yml
@@ -17,6 +17,7 @@
     - update-alternatives --install /usr/bin/python python /usr/bin/python3 10
     - python -m pip uninstall -y ansible ansible-base ansible-core
     - python -m pip install -r tests/requirements.txt
+    - ansible-galaxy install -r tests/requirements.yml
     - ./tests/scripts/vagrant_clean.sh
   script:
     - ./tests/scripts/testcases_run.sh
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6986c0f14aea4dad0583499688817a5c7bf00a23..08f2f94758999d75b2ce6fe710699dbe8821b0e5 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
 virtualenv venv
 source venv/bin/activate
 pip install -r tests/requirements.txt
+ansible-galaxy install -r tests/requirements.yml
 ```
 
 #### Linting
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 04609bb01735b0998df4cbf2df0f82f30bddb8da..59023df3c2ce03200adaaf37e6625a50433d2c12 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -67,7 +67,7 @@
     mode: 0640
 
 - name: Add my pubkey to "{{ distro_user }}" user authorized keys
-  authorized_key:
+  ansible.posix.authorized_key:
     user: "{{ distro_user }}"
     state: present
     key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 5b63a6b37d071dc63ac4235e3aafb269571fe8d8..2541a9319d1f33fbb584fbc8195de8b9fbaa52bc 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -13,7 +13,7 @@
     distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
 
 - name: Create dind node containers from "containers" inventory section
-  docker_container:
+  community.docker.docker_container:
     image: "{{ distro_image }}"
     name: "{{ item }}"
     state: started
diff --git a/contrib/kvm-setup/kvm-setup.yml b/contrib/kvm-setup/kvm-setup.yml
index 18b7206684659b93aeea71b432f24cf8a1ab1b81..0496d78b783492aebfb66f837591b9587ffa5424 100644
--- a/contrib/kvm-setup/kvm-setup.yml
+++ b/contrib/kvm-setup/kvm-setup.yml
@@ -3,6 +3,6 @@
   gather_facts: False
   become: yes
   vars:
-    - bootstrap_os: none
+    bootstrap_os: none
   roles:
-    - kvm-setup
+    - { role: kvm-setup }
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
index a033c4ee94f85518a4bd1d0eb351d8b87696db6e..fa89836d4da8cda9ce94fb9d255ba929841b4294 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
@@ -23,8 +23,8 @@
   when: ansible_os_family == "Debian"
 
 # Create deployment user if required
-- include: user.yml
+- include_tasks: user.yml
   when: k8s_deployment_user is defined
 
 # Set proper sysctl values
-- include: sysctl.yml
+- import_tasks: sysctl.yml
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
index d991b10ac7adb418570fe7384c690cef4b943e5c..52bc83f09980ced4fa6c3d25077fd01ad92264a2 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
@@ -1,6 +1,6 @@
 ---
 - name: Load br_netfilter module
-  modprobe:
+  community.general.modprobe:
     name: br_netfilter
     state: present
   register: br_netfilter
@@ -25,7 +25,7 @@
 
 
 - name: Enable net.ipv4.ip_forward in sysctl
-  sysctl:
+  ansible.posix.sysctl:
     name: net.ipv4.ip_forward
     value: 1
     sysctl_file: "{{ sysctl_file_path }}"
@@ -33,7 +33,7 @@
     reload: yes
 
 - name: Set bridge-nf-call-{arptables,iptables} to 0
-  sysctl:
+  ansible.posix.sysctl:
     name: "{{ item }}"
     state: present
     value: 0
diff --git a/contrib/mitogen/mitogen.yml b/contrib/mitogen/mitogen.yml
index 4dbd0fb76ecc011d68378273e7c6cc41218a37c7..7b93faf2f2f2bd4a545459dbef4488b8d94cf4c5 100644
--- a/contrib/mitogen/mitogen.yml
+++ b/contrib/mitogen/mitogen.yml
@@ -1,6 +1,6 @@
 ---
 - name: Check ansible version
-  import_playbook: ansible_version.yml
+  import_playbook: kubernetes_sigs.kubespray.ansible_version
 
 - hosts: localhost
   strategy: linear
@@ -24,6 +24,7 @@
         url: "{{ mitogen_url }}"
         dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
         validate_certs: true
+        mode: 0644
 
     - name: extract archive
       unarchive:
@@ -31,12 +32,12 @@
         dest: "{{ playbook_dir }}/dist/"
 
     - name: copy plugin
-      synchronize:
+      ansible.posix.synchronize:
         src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
         dest: "{{ playbook_dir }}/plugins/mitogen"
 
     - name: add strategy to ansible.cfg
-      ini_file:
+      community.general.ini_file:
         path: ansible.cfg
         mode: 0644
         section: "{{ item.section | d('defaults') }}"
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
index 8d3513f029805ea26029534adec202114a1426b2..b7fe4962e0432fa2368cd1cebad117f8c75e869d 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
@@ -6,12 +6,12 @@ galaxy_info:
   description: GlusterFS installation for Linux.
   company: "Midwestern Mac, LLC"
   license: "license (BSD, MIT)"
-  min_ansible_version: 2.0
+  min_ansible_version: "2.0"
   platforms:
   - name: EL
     versions:
-    - 6
-    - 7
+    - "6"
+    - "7"
   - name: Ubuntu
     versions:
     - precise
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
index e6c3dacb077b998df281885ff2703061412544a2..151ea57512de8958edb8b370ad1dc3d2c7ac6466 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
@@ -3,14 +3,17 @@
 # hyperkube and needs to be installed as part of the system.
 
 # Setup/install tasks.
-- include: setup-RedHat.yml
+- include_tasks: setup-RedHat.yml
   when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
 
-- include: setup-Debian.yml
+- include_tasks: setup-Debian.yml
   when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
 
 - name: Ensure Gluster mount directories exist.
-  file: "path={{ item }} state=directory mode=0775"
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0775
   with_items:
     - "{{ gluster_mount_dir }}"
   when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml
index 86827efcd40f29ed1aa3e1e588a18a05190b746f..d2ee36aa7cc9e29100545b70c04f556d5e861de8 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml
@@ -1,10 +1,14 @@
 ---
 - name: Install Prerequisites
-  package: name={{ item }}  state=present
+  package:
+    name: "{{ item }}"
+    state: present
   with_items:
     - "centos-release-gluster{{ glusterfs_default_release }}"
 
 - name: Install Packages
-  package: name={{ item }}  state=present
+  package:
+    name: "{{ item }}"
+    state: present
   with_items:
     - glusterfs-client
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
index 8d3513f029805ea26029534adec202114a1426b2..b7fe4962e0432fa2368cd1cebad117f8c75e869d 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
@@ -6,12 +6,12 @@ galaxy_info:
   description: GlusterFS installation for Linux.
   company: "Midwestern Mac, LLC"
   license: "license (BSD, MIT)"
-  min_ansible_version: 2.0
+  min_ansible_version: "2.0"
   platforms:
   - name: EL
     versions:
-    - 6
-    - 7
+    - "6"
+    - "7"
   - name: Ubuntu
     versions:
     - precise
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 0a58598505d8e511fa544ce62782f45b12fc7d8c..db82d5f11c06bbf9430da8bec0400aa86c99c666 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -5,39 +5,55 @@
 
 # Install xfs package
 - name: install xfs Debian
-  apt: name=xfsprogs state=present
+  apt:
+    name: xfsprogs
+    state: present
   when: ansible_os_family == "Debian"
 
 - name: install xfs RedHat
-  package: name=xfsprogs state=present
+  package:
+    name: xfsprogs
+    state: present
   when: ansible_os_family == "RedHat"
 
 # Format external volumes in xfs
 - name: Format volumes in xfs
-  filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
+  community.general.filesystem:
+    fstype: xfs
+    dev: "{{ disk_volume_device_1 }}"
 
 # Mount external volumes
 - name: mounting new xfs filesystem
-  mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
+  ansible.posix.mount:
+    name: "{{ gluster_volume_node_mount_dir }}"
+    src: "{{ disk_volume_device_1 }}"
+    fstype: xfs
+    state: mounted"
 
 # Setup/install tasks.
-- include: setup-RedHat.yml
+- include_tasks: setup-RedHat.yml
   when: ansible_os_family == 'RedHat'
 
-- include: setup-Debian.yml
+- include_tasks: setup-Debian.yml
   when: ansible_os_family == 'Debian'
 
 - name: Ensure GlusterFS is started and enabled at boot.
-  service: "name={{ glusterfs_daemon }} state=started enabled=yes"
+  service:
+    name: "{{ glusterfs_daemon }}"
+    state: started
+    enabled: yes
 
 - name: Ensure Gluster brick and mount directories exist.
-  file: "path={{ item }} state=directory mode=0775"
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0775
   with_items:
     - "{{ gluster_brick_dir }}"
     - "{{ gluster_mount_dir }}"
 
 - name: Configure Gluster volume with replicas
-  gluster_volume:
+  gluster.gluster.gluster_volume:
     state: present
     name: "{{ gluster_brick_name }}"
     brick: "{{ gluster_brick_dir }}"
@@ -49,7 +65,7 @@
   when: groups['gfs-cluster']|length > 1
 
 - name: Configure Gluster volume without replicas
-  gluster_volume:
+  gluster.gluster.gluster_volume:
     state: present
     name: "{{ gluster_brick_name }}"
     brick: "{{ gluster_brick_dir }}"
@@ -60,7 +76,7 @@
   when: groups['gfs-cluster']|length <= 1
 
 - name: Mount glusterfs to retrieve disk size
-  mount:
+  ansible.posix.mount:
     name: "{{ gluster_mount_dir }}"
     src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
     fstype: glusterfs
@@ -69,7 +85,8 @@
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Get Gluster disk size
-  setup: filter=ansible_mounts
+  setup:
+    filter: ansible_mounts
   register: mounts_data
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
@@ -86,7 +103,7 @@
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Unmount glusterfs
-  mount:
+  ansible.posix.mount:
     name: "{{ gluster_mount_dir }}"
     fstype: glusterfs
     src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml
index 9dc8f0bb4ba82c01cf2776ad1f8ba923ebf39405..5a4e09ef36dfe75e659ce73bf6464e3619bed3dc 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml
@@ -1,11 +1,15 @@
 ---
 - name: Install Prerequisites
-  package: name={{ item }}  state=present
+  package:
+    name: "{{ item }}"
+    state: present
   with_items:
     - "centos-release-gluster{{ glusterfs_default_release }}"
 
 - name: Install Packages
-  package: name={{ item }}  state=present
+  package:
+    name: "{{ item }}"
+    state: present
   with_items:
     - glusterfs-server
     - glusterfs-client
diff --git a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
index dad3bae9700fb5a337ee98fe25a2ae3573653d12..20012b120dafedfe2af9b83e5bdaa505f0fbdadd 100644
--- a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
@@ -5,7 +5,7 @@
     - "dm_snapshot"
     - "dm_mirror"
     - "dm_thin_pool"
-  modprobe:
+  community.general.modprobe:
     name: "{{ item }}"
     state: "present"
 
diff --git a/contrib/offline/generate_list.yml b/contrib/offline/generate_list.yml
index c3458e6756cbc980c7af431109c05298df897ac5..5442425bc9aa1ca2e969e3e41e6e15f29c8d227f 100644
--- a/contrib/offline/generate_list.yml
+++ b/contrib/offline/generate_list.yml
@@ -14,6 +14,7 @@
     - template:
         src: ./contrib/offline/temp/{{ item }}.list.template
         dest: ./contrib/offline/temp/{{ item }}.list
+        mode: 0644
       with_items:
         - files
         - images
diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml
index 84aad69c8809be6fc24ef280bf265e5b2b5a9984..7e8a0df4c51a4ec8001c0b821df015e6d071da78 100644
--- a/playbooks/ansible_version.yml
+++ b/playbooks/ansible_version.yml
@@ -3,8 +3,8 @@
   gather_facts: false
   become: no
   vars:
-    minimal_ansible_version: 2.12.0
-    maximal_ansible_version: 2.13.0
+    minimal_ansible_version: 2.14.0
+    maximal_ansible_version: 2.16.0
     ansible_connection: local
   tags: always
   tasks:
diff --git a/playbooks/reset.yml b/playbooks/reset.yml
index 6fa9fa3accb1883816c512488a151e917ce8cc44..654f0a1b7e1f2ab8d899b2d14d78317787492dc5 100644
--- a/playbooks/reset.yml
+++ b/playbooks/reset.yml
@@ -17,14 +17,15 @@
 
 - hosts: etcd:k8s_cluster:calico_rr
   gather_facts: False
-  vars_prompt:
-    name: "reset_confirmation"
-    prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
-    default: "no"
-    private: no
-
   pre_tasks:
-    - name: check confirmation
+    - name: Reset Confirmation
+      pause:
+        prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
+      register: reset_confirmation
+      run_once: True
+      when:
+        - not (skip_confirmation | default(false) | bool)
+    - name: Check confirmation
       fail:
         msg: "Reset confirmation failed"
       when: reset_confirmation != "yes"
diff --git a/requirements.txt b/requirements.txt
index 3b97a13a5736e7b57e58de270dbbd2f8fa5d1adc..d2724e99e5b49f53453b5ff62f8c8a208299e91e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,10 +1,10 @@
-ansible==5.7.1
-ansible-core==2.12.10
-cryptography==3.4.8
+ansible==7.6.0
+ansible-core==2.14.6
+cryptography==41.0.1
 jinja2==3.1.2
 jmespath==1.0.1
-MarkupSafe==2.1.2
+MarkupSafe==2.1.3
 netaddr==0.8.0
 pbr==5.11.1
-ruamel.yaml==0.17.21
+ruamel.yaml==0.17.31
 ruamel.yaml.clib==0.2.7
diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml
index 19555005aeb119a6d3aad072b6033cb156423e26..aaab37202459e0a30fd2646c6d6aa2b556582f6f 100644
--- a/roles/bootstrap-os/tasks/bootstrap-centos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml
@@ -5,7 +5,7 @@
     filter: ansible_distribution_*version
 
 - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
-  ini_file:
+  community.general.ini_file:
     path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
     section: main
     option: proxy
@@ -21,6 +21,7 @@
   get_url:
     url: https://yum.oracle.com/public-yum-ol7.repo
     dest: /etc/yum.repos.d/public-yum-ol7.repo
+    mode: 0644
   when:
     - use_oracle_public_repo|default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
@@ -28,7 +29,7 @@
   environment: "{{ proxy_env }}"
 
 - name: Enable Oracle Linux repo
-  ini_file:
+  community.general.ini_file:
     dest: /etc/yum.repos.d/public-yum-ol7.repo
     section: "{{ item }}"
     option: enabled
@@ -53,7 +54,7 @@
     - (ansible_distribution_version | float) >= 7.6
 
 - name: Enable Oracle Linux repo
-  ini_file:
+  community.general.ini_file:
     dest: "/etc/yum.repos.d/oracle-linux-ol{{ ansible_distribution_major_version }}.repo"
     section: "ol{{ ansible_distribution_major_version }}_addons"
     option: "{{ item.option }}"
@@ -69,7 +70,7 @@
     - (ansible_distribution_version | float) >= 7.6
 
 - name: Enable Centos extra repo for Oracle Linux
-  ini_file:
+  community.general.ini_file:
     dest: "/etc/yum.repos.d/centos-extras.repo"
     section: "extras"
     option: "{{ item.option }}"
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora.yml b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
index 1613173156e1784cfed18e813bb60cc855d9ccc9..4ce77b44a9113d059993cc5b1b2a7d6624720949 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora.yml
@@ -10,7 +10,7 @@
     - facts
 
 - name: Add proxy to dnf.conf if http_proxy is defined
-  ini_file:
+  community.general.ini_file:
     path: "/etc/dnf/dnf.conf"
     section: main
     option: proxy
diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
index c833bfd0d5f1a50f2af2d7026ba502bee1688a54..9b69dcd8916289b2379c5631cd2465f72c67d812 100644
--- a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
@@ -58,7 +58,7 @@
 
 # Without this package, the get_url module fails when trying to handle https
 - name: Install python-cryptography
-  zypper:
+  community.general.zypper:
     name: python-cryptography
     state: present
     update_cache: true
@@ -67,7 +67,7 @@
     - ansible_distribution_version is version('15.4', '<')
 
 - name: Install python3-cryptography
-  zypper:
+  community.general.zypper:
     name: python3-cryptography
     state: present
     update_cache: true
@@ -77,7 +77,7 @@
 
 # Nerdctl needs some basic packages to get an environment up
 - name: Install basic dependencies
-  zypper:
+  community.general.zypper:
     name:
       - iptables
       - apparmor-parser
diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
index def816465f17adc737b3487ce50ad4e61ae3de01..c9e53525b398485a54573c655b95b7c936bdf9ff 100644
--- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
@@ -5,7 +5,7 @@
     filter: ansible_distribution_*version
 
 - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
-  ini_file:
+  community.general.ini_file:
     path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
     section: main
     option: proxy
@@ -31,7 +31,7 @@
   become: true
 
 - name: RHEL subscription Organization ID/Activation Key registration
-  redhat_subscription:
+  community.general.redhat_subscription:
     state: present
     org_id: "{{ rh_subscription_org_id }}"
     activationkey: "{{ rh_subscription_activation_key }}"
@@ -50,7 +50,7 @@
 
 # this task has no_log set to prevent logging security sensitive information such as subscription passwords
 - name: RHEL subscription Username/Password registration
-  redhat_subscription:
+  community.general.redhat_subscription:
     state: present
     username: "{{ rh_subscription_username }}"
     password: "{{ rh_subscription_password }}"
@@ -70,7 +70,7 @@
 
 # container-selinux is in extras repo
 - name: Enable RHEL 7 repos
-  rhsm_repository:
+  community.general.rhsm_repository:
     name:
       - "rhel-7-server-rpms"
       - "rhel-7-server-extras-rpms"
@@ -81,7 +81,7 @@
 
 # container-selinux is in appstream repo
 - name: Enable RHEL 8 repos
-  rhsm_repository:
+  community.general.rhsm_repository:
     name:
       - "rhel-8-for-*-baseos-rpms"
       - "rhel-8-for-*-appstream-rpms"
diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml
index ab06ca01a342a50fe10db742dc59a70f8133c6f3..fd2f119afed66fe73d32e75507c53c1a29eb4546 100644
--- a/roles/container-engine/cri-o/tasks/cleanup.yaml
+++ b/roles/container-engine/cri-o/tasks/cleanup.yaml
@@ -83,7 +83,7 @@
   when: ansible_distribution in ["Amazon"]
 
 - name: Disable modular repos for CRI-O
-  ini_file:
+  community.general.ini_file:
     path: "/etc/yum.repos.d/{{ item.repo }}.repo"
     section: "{{ item.section }}"
     option: enabled
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index d6c7eb0991dc1351dec531185149502e85f1d402..bdd300b2ad4f5daf77afa8b80273282ac4b0f51b 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -122,7 +122,7 @@
     mode: 0755
 
 - name: cri-o | set overlay driver
-  ini_file:
+  community.general.ini_file:
     dest: /etc/containers/storage.conf
     section: storage
     option: "{{ item.option }}"
@@ -136,7 +136,7 @@
 
 # metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
 - name: cri-o | set metacopy mount options correctly
-  ini_file:
+  community.general.ini_file:
     dest: /etc/containers/storage.conf
     section: storage.options.overlay
     option: mountopt
diff --git a/roles/container-engine/cri-o/tasks/reset.yml b/roles/container-engine/cri-o/tasks/reset.yml
index 9c8c0aac1422f9b1c5481421b3982d2d71c116c5..460382766f5c45a54937011edd70de1ef730398e 100644
--- a/roles/container-engine/cri-o/tasks/reset.yml
+++ b/roles/container-engine/cri-o/tasks/reset.yml
@@ -43,8 +43,6 @@
 
 - name: CRI-O | Run yum-clean-metadata
   command: yum clean metadata
-  args:
-    warn: no
   when:
     - ansible_os_family == "RedHat"
   tags:
diff --git a/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
index 369036725219890b0eedc781a037e562886be3b7..843bc2029643b92cf7c4cfcdbf5c2437062c622b 100644
--- a/roles/container-engine/cri-o/tasks/setup-amazon.yaml
+++ b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
@@ -14,7 +14,7 @@
     - amzn2_extras_file_stat.stat.exists
 
 - name: Remove docker repository
-  ini_file:
+  community.general.ini_file:
     dest: /etc/yum.repos.d/amzn2-extras.repo
     section: amzn2extra-docker
     option: enabled
diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml
index e5b7cd8db2d332c5073af00ae6656b6ee541c8bd..9d1bf91268ac77c5d512b9fd1620419eba8af85f 100644
--- a/roles/container-engine/kata-containers/tasks/main.yml
+++ b/roles/container-engine/kata-containers/tasks/main.yml
@@ -36,7 +36,7 @@
     - qemu
 
 - name: kata-containers | Load vhost kernel modules
-  modprobe:
+  community.general.modprobe:
     state: present
     name: "{{ item }}"
   with_items:
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 39e0e34c281b20b6eca9b2dca02d11198e7da144..e956b6ff2722fbafa436d3e522adf4fdfd078259 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -80,7 +80,7 @@
         - download_run_once
 
     - name: download_container | Copy image to ansible host cache
-      synchronize:
+      ansible.posix.synchronize:
         src: "{{ image_path_final }}"
         dest: "{{ image_path_cached }}"
         use_ssh_args: true
@@ -92,7 +92,7 @@
         - download_delegate == inventory_hostname
 
     - name: download_container | Upload image to node if it is cached
-      synchronize:
+      ansible.posix.synchronize:
         src: "{{ image_path_cached }}"
         dest: "{{ image_path_final }}"
         use_ssh_args: true
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index e6576f29ea70daff983673b3ecc5ef5eb0855f99..fba76405e8cabf7c1d5bdee431f8b5602ae0698b 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -105,7 +105,7 @@
     no_log: "{{ not (unsafe_show_logs|bool) }}"
 
   - name: download_file | Copy file back to ansible host file cache
-    synchronize:
+    ansible.posix.synchronize:
       src: "{{ file_path_cached }}"
       dest: "{{ file_path_cached }}"
       use_ssh_args: true
@@ -116,7 +116,7 @@
     - download_delegate == inventory_hostname
 
   - name: download_file | Copy file from cache to nodes, if it is available
-    synchronize:
+    ansible.posix.synchronize:
       src: "{{ file_path_cached }}"
       dest: "{{ download.dest }}"
       use_ssh_args: true
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index d848cdbcb76ce7fd8d645ae2ebbd6c0145348f6b..2c5577862b735d95ebbddd7b18bc7cffa1e2e656 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -11,7 +11,8 @@
   when: etcd_cluster_is_healthy.rc == 0
 
 - name: Refresh Time Fact
-  setup: filter=ansible_date_time
+  setup:
+    filter: ansible_date_time
 
 - name: Set Backup Directory
   set_fact:
@@ -40,7 +41,7 @@
       --data-dir {{ etcd_data_dir }}
       --backup-dir {{ etcd_backup_directory }}
   environment:
-    ETCDCTL_API: 2
+    ETCDCTL_API: "2"
   retries: 3
   register: backup_v2_command
   until: backup_v2_command.rc == 0
@@ -51,7 +52,7 @@
     {{ bin_dir }}/etcdctl
       snapshot save {{ etcd_backup_directory }}/snapshot.db
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/handlers/backup_cleanup.yml b/roles/etcd/handlers/backup_cleanup.yml
index e670f46f830a4fb7e5c96a90016de3379c543ada..3cebfd0469b6c63ff87ece8179cfc71dee2b5027 100644
--- a/roles/etcd/handlers/backup_cleanup.yml
+++ b/roles/etcd/handlers/backup_cleanup.yml
@@ -7,5 +7,6 @@
 - name: Remove old etcd backups
   shell:
     chdir: "{{ etcd_backup_prefix }}"
-    cmd: "find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf"
+    cmd: "set -o pipefail && find . -name 'etcd-*' -type d | sort -n | head -n -{{ etcd_backup_retention_count }} | xargs rm -rf"
+    executable: /bin/bash
   when: etcd_backup_retention_count >= 0
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 7534e4176c1f38a2e9a099c638e1699a1d390d36..3fdedccac4de3eced7e7224707ffe84f51ef710c 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -8,11 +8,13 @@
   changed_when: false
   check_mode: no
   run_once: yes
-  when: is_etcd_master and etcd_cluster_setup
+  when:
+    - is_etcd_master
+    - etcd_cluster_setup
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -27,11 +29,13 @@
   changed_when: false
   check_mode: no
   run_once: yes
-  when: is_etcd_master and etcd_events_cluster_setup
+  when:
+    - is_etcd_master
+    - etcd_events_cluster_setup
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -96,7 +100,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -119,7 +123,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -135,7 +139,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -151,7 +155,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 9f01b1ffb3f745468520a056f220198a7233da11..7beda4d7895698496f3feacb4256b4a8b6befa55 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -41,16 +41,18 @@
 - name: Gen_certs | run cert generation script for etcd and kube control plane nodes
   command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
   environment:
-    - MASTERS: "{% for m in groups['etcd'] %}
-                  {% if gen_master_certs[m] %}
-                    {{ m }}
-                  {% endif %}
-                {% endfor %}"
-    - HOSTS: "{% for h in groups['kube_control_plane'] %}
-                {% if gen_node_certs[h] %}
-                    {{ h }}
-                {% endif %}
-              {% endfor %}"
+    MASTERS: |-
+      {% for m in groups['etcd'] %}
+        {% if gen_master_certs[m] %}
+          {{ m }}
+        {% endif %}
+      {% endfor %}
+    HOSTS: |-
+      {% for h in groups['kube_control_plane'] %}
+        {% if gen_node_certs[h] %}
+          {{ h }}
+        {% endif %}
+      {% endfor %}
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when: gen_certs|default(false)
@@ -59,11 +61,12 @@
 - name: Gen_certs | run cert generation script for all clients
   command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
   environment:
-    - HOSTS: "{% for h in groups['k8s_cluster'] %}
-                {% if gen_node_certs[h] %}
-                    {{ h }}
-                {% endif %}
-              {% endfor %}"
+    HOSTS: |-
+      {% for h in groups['k8s_cluster'] %}
+        {% if gen_node_certs[h] %}
+          {{ h }}
+        {% endif %}
+      {% endfor %}
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml
index d176e01aa9aab5cb8520939b01602edcb5869d35..73e64c29f9910387b13e2323b8f96ecea07ff03f 100644
--- a/roles/etcd/tasks/gen_nodes_certs_script.yml
+++ b/roles/etcd/tasks/gen_nodes_certs_script.yml
@@ -17,7 +17,6 @@
   shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
   args:
     executable: /bin/bash
-    warn: false
   no_log: "{{ not (unsafe_show_logs|bool) }}"
   register: etcd_node_certs
   check_mode: no
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index 8336f1a40318e39887f39fc2f917d6dc9346d8b8..4bdd225fb078d27014adb22b04dd35d4a3340ac0 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -1,12 +1,12 @@
 ---
 - name: Join Member | Add member to etcd-events cluster  # noqa 301 305
-  shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
+  command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0
   retries: "{{ etcd_retries }}"
   delay: "{{ retry_stagger | random + 3 }}"
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -34,7 +34,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index 22440394f2dc8e9d975eb4bf301f43ae546cbc6f..6bc28f8610f4fdf41c54cda4f4ddaa46786d7c1d 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -1,13 +1,13 @@
 ---
 - name: Join Member | Add member to etcd cluster  # noqa 301 305
-  shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
+  command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
   register: member_add_result
   until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
   failed_when: member_add_result.rc != 0 and 'Peer URLs already exists' not in member_add_result.stderr
   retries: "{{ etcd_retries }}"
   delay: "{{ retry_stagger | random + 3 }}"
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -38,7 +38,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
diff --git a/roles/kubernetes-apps/argocd/tasks/main.yml b/roles/kubernetes-apps/argocd/tasks/main.yml
index dd66d7375ad36b68e5c65540f3fe60c67888b840..e11f0976bf3093dd90a8e2cc02ca43c457a16c28 100644
--- a/roles/kubernetes-apps/argocd/tasks/main.yml
+++ b/roles/kubernetes-apps/argocd/tasks/main.yml
@@ -5,7 +5,7 @@
     download: "{{ download_defaults | combine(downloads.yq) }}"
 
 - name: Kubernetes Apps | Copy yq binary from download dir
-  synchronize:
+  ansible.posix.synchronize:
     src: "{{ downloads.yq.dest }}"
     dest: "{{ bin_dir }}/yq"
     compress: no
@@ -46,7 +46,7 @@
     - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: Kubernetes Apps | Copy ArgoCD remote manifests from download dir
-  synchronize:
+  ansible.posix.synchronize:
     src: "{{ local_release_dir }}/{{ item.file }}"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
     compress: no
diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
index 2224ae5f1ca5ac367e8ed49d6d0e33c107cc018c..528519beed01a635be79386436534f85182be2f5 100644
--- a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
+++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 
-- include: credentials-check.yml
+- import_tasks: credentials-check.yml
 
 - name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
   template:
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index f0e07018c16cec75306c2d35758fe38bf53fd0fd..643c0ce454b3a635efe6624111c54d8c59af5861 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -66,7 +66,10 @@
     - cloud_provider == 'oci'
 
 - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
-  copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
+  copy:
+    src: k8s-cluster-critical-pc.yml
+    dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
+    mode: 0640
   when: inventory_hostname == groups['kube_control_plane']|last
 
 - name: PriorityClass | Create k8s-cluster-critical
diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml
index e6bc321e20a534f4362006fa477da1a2cd0674bb..d5f17963ffd646781396b73b8a656aa5f96d0efb 100644
--- a/roles/kubernetes/control-plane/handlers/main.yml
+++ b/roles/kubernetes/control-plane/handlers/main.yml
@@ -44,7 +44,9 @@
     state: restarted
 
 - name: Master | Remove apiserver container docker
-  shell: docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
+  shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
+  args:
+    executable: /bin/bash
   register: remove_apiserver_container
   retries: 10
   until: remove_apiserver_container.rc == 0
@@ -52,7 +54,9 @@
   when: container_manager == "docker"
 
 - name: Master | Remove apiserver container containerd/crio
-  shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  args:
+    executable: /bin/bash
   register: remove_apiserver_container
   retries: 10
   until: remove_apiserver_container.rc == 0
@@ -60,7 +64,9 @@
   when: container_manager in ['containerd', 'crio']
 
 - name: Master | Remove scheduler container docker
-  shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  args:
+    executable: /bin/bash
   register: remove_scheduler_container
   retries: 10
   until: remove_scheduler_container.rc == 0
@@ -68,7 +74,9 @@
   when: container_manager == "docker"
 
 - name: Master | Remove scheduler container containerd/crio
-  shell: "{{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  args:
+    executable: /bin/bash
   register: remove_scheduler_container
   retries: 10
   until: remove_scheduler_container.rc == 0
@@ -76,7 +84,9 @@
   when: container_manager in ['containerd', 'crio']
 
 - name: Master | Remove controller manager container docker
-  shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  args:
+    executable: /bin/bash
   register: remove_cm_container
   retries: 10
   until: remove_cm_container.rc == 0
@@ -84,7 +94,9 @@
   when: container_manager == "docker"
 
 - name: Master | Remove controller manager container containerd/crio
-  shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  args:
+    executable: /bin/bash
   register: remove_cm_container
   retries: 10
   until: remove_cm_container.rc == 0
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index 97d52e8c3ee0a939e4b213f6c583625e317dcf71..d68d5bdde8303e97e72c37213dbd90b35405dbe9 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -1,7 +1,9 @@
 ---
 - block:
   - name: look up docker cgroup driver
-    shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
+    shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
+    args:
+      executable: /bin/bash
     register: docker_cgroup_driver_result
     changed_when: false
     check_mode: no
@@ -13,7 +15,9 @@
 
 - block:
   - name: look up crio cgroup driver
-    shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
+    shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
+    args:
+      executable: /bin/bash
     register: crio_cgroup_driver_result
     changed_when: false
 
@@ -40,7 +44,6 @@
   when: kubelet_cgroup_driver == 'cgroupfs'
 
 - name: set kubelet_config_extra_args options when cgroupfs is used
-  vars:
   set_fact:
     kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
   when: kubelet_cgroup_driver == 'cgroupfs'
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 59dc3007a540fccbbfdbe087bd3900a721cee557..99babd64f7232f64d376df17d7ccc67f0ce2d05a 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -41,7 +41,7 @@
     - haproxy
 
 - name: Ensure nodePort range is reserved
-  sysctl:
+  ansible.posix.sysctl:
     name: net.ipv4.ip_local_reserved_ports
     value: "{{ kube_apiserver_node_port_range }}"
     sysctl_set: yes
@@ -68,7 +68,7 @@
     mode: 0755
 
 - name: Enable br_netfilter module
-  modprobe:
+  community.general.modprobe:
     name: br_netfilter
     state: present
   when: modinfo_br_netfilter.rc == 0
@@ -89,7 +89,7 @@
   register: sysctl_bridge_nf_call_iptables
 
 - name: Enable bridge-nf-call tables
-  sysctl:
+  ansible.posix.sysctl:
     name: "{{ item }}"
     state: present
     sysctl_file: "{{ sysctl_file_path }}"
@@ -102,7 +102,7 @@
     - net.bridge.bridge-nf-call-ip6tables
 
 - name: Modprobe Kernel Module for IPVS
-  modprobe:
+  community.general.modprobe:
     name: "{{ item }}"
     state: present
   with_items:
@@ -115,7 +115,7 @@
     - kube-proxy
 
 - name: Modprobe nf_conntrack_ipv4
-  modprobe:
+  community.general.modprobe:
     name: nf_conntrack_ipv4
     state: present
   register: modprobe_nf_conntrack_ipv4
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index 0212530f4f1c0bde34a1e1269c4a126b47452fd1..7cb0c318502080889e99d5e769a580feeeedc9bb 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -68,7 +68,9 @@
   when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
 
 - name: Preinstall | restart kube-controller-manager docker
-  shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  args:
+    executable: /bin/bash
   when:
     - container_manager == "docker"
     - inventory_hostname in groups['kube_control_plane']
@@ -77,7 +79,9 @@
     - kube_controller_set.stat.exists
 
 - name: Preinstall | restart kube-controller-manager crio/containerd
-  shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  args:
+    executable: /bin/bash
   register: preinstall_restart_controller_manager
   retries: 10
   delay: 1
@@ -90,7 +94,9 @@
     - kube_controller_set.stat.exists
 
 - name: Preinstall | restart kube-apiserver docker
-  shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  args:
+    executable: /bin/bash
   when:
     - container_manager == "docker"
     - inventory_hostname in groups['kube_control_plane']
@@ -99,7 +105,9 @@
     - kube_apiserver_set.stat.exists
 
 - name: Preinstall | restart kube-apiserver crio/containerd
-  shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
+  args:
+    executable: /bin/bash
   register: preinstall_restart_apiserver
   retries: 10
   until: preinstall_restart_apiserver.rc == 0
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
index 997a18c851093376a6a89854300f628eea403384..ce574f86cce06af7b14d680b41f3dccc5bb0245b 100644
--- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -1,6 +1,6 @@
 ---
 - name: Remove swapfile from /etc/fstab
-  mount:
+  ansible.posix.mount:
     name: "{{ item }}"
     fstype: swap
     state: absent
diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
index 7249ac898d14a440c1a406379a3a3e520a4632c6..9ad5f7d107c6d5453cb9da12516aff8d6f3ce38d 100644
--- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
+++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
@@ -1,6 +1,6 @@
 ---
 - name: NetworkManager | Add nameservers to NM configuration
-  ini_file:
+  community.general.ini_file:
     path: /etc/NetworkManager/conf.d/dns.conf
     section: global-dns-domain-*
     option: servers
@@ -15,7 +15,7 @@
   when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
 
 - name: NetworkManager | Add DNS search to NM configuration
-  ini_file:
+  community.general.ini_file:
     path: /etc/NetworkManager/conf.d/dns.conf
     section: global-dns
     option: searches
@@ -25,7 +25,7 @@
   notify: Preinstall | update resolvconf for networkmanager
 
 - name: NetworkManager | Add DNS options to NM configuration
-  ini_file:
+  community.general.ini_file:
     path: /etc/NetworkManager/conf.d/dns.conf
     section: global-dns
     option: options
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index d4fa45b8bed84dec55b29e97942ed3d9160e7400..2bf5523515ff075570562f538bcfb67325243359 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -12,7 +12,7 @@
   register: slc
 
 - name: Set selinux policy
-  selinux:
+  ansible.posix.selinux:
     policy: targeted
     state: "{{ preinstall_selinux_state }}"
   when:
@@ -71,7 +71,7 @@
     mode: 0755
 
 - name: Enable ip forwarding
-  sysctl:
+  ansible.posix.sysctl:
     sysctl_file: "{{ sysctl_file_path }}"
     name: net.ipv4.ip_forward
     value: "1"
@@ -79,7 +79,7 @@
     reload: yes
 
 - name: Enable ipv6 forwarding
-  sysctl:
+  ansible.posix.sysctl:
     sysctl_file: "{{ sysctl_file_path }}"
     name: net.ipv6.conf.all.forwarding
     value: "1"
@@ -97,7 +97,7 @@
   ignore_errors: true  # noqa ignore-errors
 
 - name: Set fs.may_detach_mounts if needed
-  sysctl:
+  ansible.posix.sysctl:
     sysctl_file: "{{ sysctl_file_path }}"
     name: fs.may_detach_mounts
     value: 1
@@ -106,7 +106,7 @@
   when: fs_may_detach_mounts.stat.exists | d(false)
 
 - name: Ensure kube-bench parameters are set
-  sysctl:
+  ansible.posix.sysctl:
     sysctl_file: "{{ sysctl_file_path }}"
     name: "{{ item.name }}"
     value: "{{ item.value }}"
@@ -122,14 +122,14 @@
   when: kubelet_protect_kernel_defaults|bool
 
 - name: Check dummy module
-  modprobe:
+  community.general.modprobe:
     name: dummy
     state: present
     params: 'numdummies=0'
   when: enable_nodelocaldns
 
 - name: Set additional sysctl variables
-  sysctl:
+  ansible.posix.sysctl:
     sysctl_file: "{{ sysctl_file_path }}"
     name: "{{ item.name }}"
     value: "{{ item.value }}"
diff --git a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
index b6c6b2e957fabb6ae7727bd4534c6b40065046d8..c2e42366d61027e1215db7db026544ed444ca2d3 100644
--- a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
@@ -78,7 +78,7 @@
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
 - name: Set timezone
-  timezone:
+  community.general.timezone:
     name: "{{ ntp_timezone }}"
   when:
     - ntp_timezone
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index aa1cf214a5f5f3b4585207e5d1bbf598e401ccd6..e80e56d6fe5440a28e9f88c1c0182b2d908cc746 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -45,7 +45,6 @@
 - name: Gen_tokens | Gather tokens
   shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
   args:
-    warn: false
     executable: /bin/bash
   register: tokens_data
   check_mode: no
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 333446e6011d82af183afddfb47cba477705137c..ec5b8e6a3235e673ce9bd7d7ac1a160244990fa2 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -33,13 +33,13 @@ kubeadm_init_timeout: 300s
 kubeadm_init_phases_skip_default: [ "addon/coredns" ]
 kubeadm_init_phases_skip: >-
   {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%}
-  {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ]
+  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
   {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%}
-  {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ]
+  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
   {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%}
-  {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ]
+  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
   {%- elif kube_proxy_remove is defined and kube_proxy_remove -%}
-  {{ kubeadm_init_phases_skip_default }} + [ "addon/kube-proxy" ]
+  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
   {%- else -%}
   {{ kubeadm_init_phases_skip_default }}
   {%- endif -%}
diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml
index b4b7af860f81ab86889f64f5657c38d87bfc1d00..fbcae3a245378176dad7e0b9d39c3d6edac649ee 100644
--- a/roles/network_plugin/calico/handlers/main.yml
+++ b/roles/network_plugin/calico/handlers/main.yml
@@ -13,14 +13,18 @@
     state: absent
 
 - name: Calico | delete calico-node docker containers
-  shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
+  args:
+    executable: /bin/bash
   register: docker_calico_node_remove
   until: docker_calico_node_remove is succeeded
   retries: 5
   when: container_manager in ["docker"]
 
 - name: Calico | delete calico-node crio/containerd containers
-  shell: '{{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
+  shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
+  args:
+    executable: /bin/bash
   register: crictl_calico_node_remove
   until: crictl_calico_node_remove is succeeded
   retries: 5
diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml
index 06e054a28224f32009443f76da3ec6aaf4d09ba0..7678e7d4aa49bb80292cf6ac330dd6d1c814960b 100644
--- a/roles/network_plugin/cilium/tasks/install.yml
+++ b/roles/network_plugin/cilium/tasks/install.yml
@@ -1,6 +1,6 @@
 ---
 - name: Cilium | Ensure BPFFS mounted
-  mount:
+  ansible.posix.mount:
     fstype: bpf
     path: /sys/fs/bpf
     src: bpffs
diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml
index 7bdfc5d42930eef6180f0fe2e1c8e069d11958ac..c0ddb33addc339d2659453fc78940bd099db84f9 100644
--- a/roles/network_plugin/kube-router/handlers/main.yml
+++ b/roles/network_plugin/kube-router/handlers/main.yml
@@ -6,14 +6,18 @@
     - Kube-router | delete kube-router crio/containerd containers
 
 - name: Kube-router | delete kube-router docker containers
-  shell: "{{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
+  shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
+  args:
+    executable: /bin/bash
   register: docker_kube_router_remove
   until: docker_kube_router_remove is succeeded
   retries: 5
   when: container_manager in ["docker"]
 
 - name: Kube-router | delete kube-router crio/containerd containers
-  shell: '{{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
+  shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
+  args:
+    executable: /bin/bash
   register: crictl_kube_router_remove
   until: crictl_kube_router_remove is succeeded
   retries: 5
diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml
index 4cc078ae7af128b98a2a03964fd3f6d605c7b3d4..23b3af964e144184d5a3acf8072510447ea7f662 100644
--- a/roles/network_plugin/kube-router/tasks/main.yml
+++ b/roles/network_plugin/kube-router/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: kube-router | Create annotations
-  include: annotate.yml
+  import_tasks: annotate.yml
   tags: annotate
 
 - name: kube-router | Create config directory
diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml
index bdc2dbc267550f6a944310878f4d4bf52ab4fb15..2b486cce206527e8d51fcb8b1c2da5364f423822 100644
--- a/roles/network_plugin/macvlan/tasks/main.yml
+++ b/roles/network_plugin/macvlan/tasks/main.yml
@@ -7,7 +7,7 @@
 
 - name: Macvlan | set node_pod_cidr
   set_fact:
-    node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
+    node_pod_cidr: "{{ node_pod_cidr_cmd.stdout }}"
 
 - name: Macvlan | Retrieve default gateway network interface
   become: false
@@ -17,7 +17,7 @@
 
 - name: Macvlan | set node_default_gateway_interface
   set_fact:
-    node_default_gateway_interface={{ node_default_gateway_interface_cmd.stdout | trim }}
+    node_default_gateway_interface: "{{ node_default_gateway_interface_cmd.stdout | trim }}"
 
 - name: Macvlan | Install network gateway interface on debian
   template:
@@ -101,7 +101,7 @@
     mode: 0644
 
 - name: Enable net.ipv4.conf.all.arp_notify in sysctl
-  sysctl:
+  ansible.posix.sysctl:
     name: net.ipv4.conf.all.arp_notify
     value: 1
     sysctl_set: yes
diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml
index 9bf1842bedb39c0a8e28d48d6fcd1687fe0fa126..ab76268a5680a3db0728259aa6364074714f2dfc 100644
--- a/roles/network_plugin/multus/tasks/main.yml
+++ b/roles/network_plugin/multus/tasks/main.yml
@@ -20,6 +20,7 @@
   template:
     src: multus-daemonset.yml.j2
     dest: "{{ kube_config_dir }}/{{ item.file }}"
+    mode: 0644
   with_items:
     - {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd }
     - {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker }
diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml
index 4a4e3eb7ec8dc34ff676cc858dba81e797efc5d4..ec50f3ffdbb106cfa9290747d6670e235bff2284 100644
--- a/roles/recover_control_plane/control-plane/tasks/main.yml
+++ b/roles/recover_control_plane/control-plane/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Wait for apiserver
   command: "{{ kubectl }} get nodes"
   environment:
-    - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
+    KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
   register: apiserver_is_ready
   until: apiserver_is_ready.rc == 0
   retries: 6
@@ -13,7 +13,7 @@
 - name: Delete broken kube_control_plane nodes from cluster
   command: "{{ kubectl }} delete node {{ item }}"
   environment:
-    - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
+    KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
   with_items: "{{ groups['broken_kube_control_plane'] }}"
   register: delete_broken_kube_masters
   failed_when: false
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 45e2c65e48ad460df28db3c4ca60ebe231976ced..1944f50d274502677c0216d823b71ecc2d134f2a 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -6,25 +6,25 @@
   changed_when: false
   check_mode: no
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
 
 - name: Set healthy fact
   set_fact:
     healthy: "{{ etcd_endpoint_health.stderr is match('Error: unhealthy cluster') }}"
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
 
 - name: Set has_quorum fact
   set_fact:
     has_quorum: "{{ etcd_endpoint_health.stdout_lines | select('match', '.*is healthy.*') | list | length >= etcd_endpoint_health.stderr_lines | select('match', '.*is unhealthy.*') | list | length }}"
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
 
 - include_tasks: recover_lost_quorum.yml
   when:
@@ -39,7 +39,7 @@
   with_items: "{{ groups['broken_etcd'] }}"
   ignore_errors: true  # noqa ignore-errors
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
     - has_quorum
 
 - name: Delete old certificates
@@ -56,7 +56,7 @@
   loop: "{{ delete_old_cerificates.results }}"
   changed_when: false
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
     - "item.rc != 0 and not 'No such file or directory' in item.stderr"
 
 - name: Get etcd cluster members
@@ -65,20 +65,20 @@
   changed_when: false
   check_mode: no
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
     - not healthy
     - has_quorum
 
 - name: Remove broken cluster members
   command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
@@ -87,7 +87,7 @@
     - "{{ groups['broken_etcd'] }}"
     - "{{ member_list.stdout_lines }}"
   when:
-    - groups['broken_etcd']
+    - inventory_hostname in groups['broken_etcd']
     - not healthy
     - has_quorum
     - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2]
diff --git a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
index 1ecc90fef7e47e0437050886fa2166ac04117150..86096fed911afb9ab52316710021d1d161d68f9e 100644
--- a/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
+++ b/roles/recover_control_plane/etcd/tasks/recover_lost_quorum.yml
@@ -2,11 +2,11 @@
 - name: Save etcd snapshot
   command: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
   environment:
-    - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
-    - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
-    - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
-    - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}"
-    - ETCDCTL_API: 3
+    ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
+    ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses.split(',') | first }}"
+    ETCDCTL_API: "3"
   when: etcd_snapshot is not defined
 
 - name: Transfer etcd snapshot to host
@@ -29,11 +29,11 @@
 - name: Restore etcd snapshot  # noqa 301 305
   shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
   environment:
-    - ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
-    - ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
-    - ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
-    - ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
-    - ETCDCTL_API: 3
+    ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
+    ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
+    ETCDCTL_API: "3"
 
 - name: Remove etcd snapshot
   file:
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index e9ef0cf6bf49d6adc7393fc51cc80ba54d6da038..f7729ea7902c0b47a3ec1f9f32b8ea0165124162 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -26,7 +26,9 @@
     - inventory_hostname in groups['etcd']
 
 - name: Lookup etcd member id
-  shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
+  shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
+  args:
+    executable: /bin/bash
   register: etcd_member_id
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
@@ -34,7 +36,7 @@
   tags:
     - facts
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
     ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
     ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
@@ -45,7 +47,7 @@
 - name: Remove etcd member from cluster
   command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
     ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
     ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 729be0908f4ba6284d14ac680278a6635a5f14ee..f6394c366444efacb7bdb0c895c1459ac4097cb5 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -178,7 +178,6 @@
   shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
   args:
     executable: /bin/bash
-    warn: false
   check_mode: no
   register: mounted_dirs
   failed_when: false
@@ -279,6 +278,7 @@
     path: "{{ filedir_path }}"
     state: touch
     attributes: "-i"
+    mode: 0644
   loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}"
   loop_control:
     loop_var: file_dir_line
diff --git a/run.rc b/run.rc
index f87ad4e0d8167bc38869d7f766dba344fd7908d0..570f0dd80a59116c8bd5a173c073a893606e95b0 100644
--- a/run.rc
+++ b/run.rc
@@ -7,6 +7,7 @@ pip install wheel
 pip install --upgrade setuptools
 pip install -r requirements.txt
 pip install -r tests/requirements.txt
+ansible-galaxy install -r tests/requirements.yml
 pre-commit install
 # prepare an inventory to test with
 INV=inventory/lab
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 3f31217d308122b8849f5c2aaa08a221652315c6..feb309d3884ecd771e10297539d6d16f4ea21ca8 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -97,7 +97,7 @@
       - /var/log/dmesg
 
   environment:
-    ETCDCTL_API: 3
+    ETCDCTL_API: "3"
     ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
@@ -120,18 +120,22 @@
       no_log: True
 
     - name: Fetch results
-      fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands
+      fetch:
+        src: "{{ item.name }}"
+        dest: "/tmp/{{ archive_dirname }}/commands"
       with_items: "{{ commands }}"
       when: item.when | default(True)
       failed_when: false
 
     - name: Fetch logs
-      fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs
+      fetch:
+        src: "{{ item }}"
+        dest: "/tmp/{{ archive_dirname }}/logs"
       with_items: "{{ logs }}"
       failed_when: false
 
     - name: Pack results and logs
-      archive:
+      community.general.archive:
         path: "/tmp/{{ archive_dirname }}"
         dest: "{{ dir|default('.') }}/logs.tar.gz"
         remove: true
@@ -142,5 +146,7 @@
       run_once: true
 
     - name: Clean up collected command outputs
-      file: path={{ item.name }} state=absent
+      file:
+        path: "{{ item.name }}"
+        state: absent
       with_items: "{{ commands }}"
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index a0b36bebb6f6ac59f8c3525a6b4d45f21590a926..832f9dd7f108486221788e68d3f3e8d92eaf908b 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -11,6 +11,7 @@
     url: "{{ item.value.url }}"
     dest: "{{ images_dir }}/{{ item.value.filename }}"
     checksum: "{{ item.value.checksum }}"
+    mode: 0644
   loop: "{{ images|dict2items }}"
 
 - name: Unxz compressed images
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index 8a03c92594a8a69f5783c9a0dc8a793c962e9a00..453c1139d7ab35b08c6ace9fa87e19082fa5d2a8 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -5,7 +5,7 @@
 
   tasks:
   - name: Provision a set of instances
-    ec2:
+    amazon.aws.ec2_instance:
       key_name: "{{ aws.key_name }}"
       aws_access_key: "{{ aws.access_key }}"
       aws_secret_key: "{{ aws.secret_key }}"
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 3726eb158557fc5c66db6eef1230f251aa409e0b..f95cbe5162b473d3984f1d37513be06f1c161f18 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -52,7 +52,8 @@
         test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - name: show vars
-      debug: msg="{{ cloud_region }}, {{ cloud_image }}"
+      debug:
+        msg: "{{ cloud_region }}, {{ cloud_image }}"
 
     - name: set instance names
       set_fact:
@@ -64,7 +65,7 @@
           {%- endif -%}
 
     - name: Manage DO instances | {{ state }}
-      digital_ocean:
+      community.digitalocean.digital_ocean:
         unique_name: yes
         api_token: "{{ lookup('env','DO_API_TOKEN') }}"
         command: "droplet"
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index f94b05bcb57ebb96505b165fe6750c8565148d11..dae55a2c5dba84fe2a103632b2a4430aeef6437c 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -46,7 +46,9 @@
       register: gce
 
     - name: Add instances to host group
-      add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
+      add_host:
+        hostname: "{{ item.public_ip }}"
+        groupname: "waitfor_hosts"
       with_items: '{{ gce.instance_data }}'
 
     - name: Template the inventory  # noqa 404 CI inventory templates are not in role_path
diff --git a/tests/cloud_playbooks/delete-aws.yml b/tests/cloud_playbooks/delete-aws.yml
index 02f9b06c7ef70b49263306081e87c78c433820c5..e207a98445d112bbd5af453b44fa3b26b787471e 100644
--- a/tests/cloud_playbooks/delete-aws.yml
+++ b/tests/cloud_playbooks/delete-aws.yml
@@ -4,10 +4,10 @@
 
   tasks:
   - name: Gather EC2 facts
-    action: ec2_facts
+    amazon.aws.ec2_metadata_facts:
 
   - name: Terminate EC2 instances
-    ec2:
+    amazon.aws.ec2_instance:
       aws_access_key: "{{ aws_access_key }}"
       aws_secret_key: "{{ aws_secret_key }}"
       state: absent
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
index 353f9910a345967051857db6e9d164568a215cc0..98bd05a61d4f6b7cb298095b5381b8058a37924e 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
@@ -25,6 +25,6 @@
   changed_when:
     - delete_namespace.rc == 0
   retries: 12
-  delay: "10"
+  delay: 10
   until:
     - delete_namespace.rc != 0
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index eeb0edb799fdc350647e3a4e637797e4f7f879ea..2f5c9d897172ddaa7291c6a39d42699d3962c18f 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -21,7 +21,7 @@
         file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
 
     - name: Create a bucket
-      gc_storage:
+      community.google.gc_storage:
         bucket: "{{ test_name }}"
         mode: create
         permission: public-read
@@ -46,11 +46,12 @@
       get_url:
         url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
         dest: "{{ dir }}/gcp-installer.sh"
+        mode: 0644
 
     - name: Get gsutil tool
-      script: "{{ dir }}/gcp-installer.sh"
+      command: "{{ dir }}/gcp-installer.sh"
       environment:
-        CLOUDSDK_CORE_DISABLE_PROMPTS: 1
+        CLOUDSDK_CORE_DISABLE_PROMPTS: "1"
         CLOUDSDK_INSTALL_DIR: "{{ dir }}"
       no_log: True
       failed_when: false
@@ -63,7 +64,7 @@
       no_log: True
 
     - name: Upload collected diagnostic info
-      gc_storage:
+      community.google.gc_storage:
         bucket: "{{ test_name }}"
         mode: put
         permission: public-read
diff --git a/tests/requirements.txt b/tests/requirements.txt
index e3c4482d7f624acbf2918121365938a5203c71ad..19474ab0945b65fa03fbd365618ddf80454b5743 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -1,11 +1,11 @@
 -r ../requirements.txt
-ansible-lint==5.4.0
+ansible-lint==6.16.2
 apache-libcloud==3.7.0
 ara[server]==1.6.1
 dopy==0.3.7
 molecule==5.0.1
 molecule-plugins[vagrant]==23.4.1
 python-vagrant==1.0.0
-pytest-testinfra==7.0.0
-tox==4.5.1
-yamllint==1.31.0
+pytest-testinfra==8.1.0
+tox==4.5.2
+yamllint==1.32.0
diff --git a/tests/requirements.yml b/tests/requirements.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2bedd2359e5ab7fe13ffdf42c156baae0498c782
--- /dev/null
+++ b/tests/requirements.yml
@@ -0,0 +1,4 @@
+---
+collections:
+  - name: amazon.aws
+    version: 6.0.1
diff --git a/tests/scripts/testcases_prepare.sh b/tests/scripts/testcases_prepare.sh
index 38191cebd2a88b8de59f22b5e12ab248b4f1070f..84d0a99ba2786d35cda133f4cafe43d5ba2dd49f 100755
--- a/tests/scripts/testcases_prepare.sh
+++ b/tests/scripts/testcases_prepare.sh
@@ -3,6 +3,7 @@ set -euxo pipefail
 
 /usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core
 /usr/bin/python -m pip install -r tests/requirements.txt
+ansible-galaxy install -r tests/requirements.yml
 mkdir -p /.ssh
 mkdir -p cluster-dump
 mkdir -p $HOME/.ssh
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index e2287f9e440f28828e34e320baf8645282619823..c736ac730812adc4e81b9d04497e346908f99396 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -32,7 +32,7 @@
         fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
 
     - name: Get Denied/Pending certificate signing requests
-      shell: "{{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true"
+      shell: "set -o pipefail && {{ bin_dir }}/kubectl get csr | grep -e Denied -e Pending || true"
       register: get_csr_denied_pending
       changed_when: false
 
@@ -87,6 +87,7 @@
   - name: Run 2 agnhost pods in test ns
     shell:
       cmd: |
+        set -o pipefail
         cat <<EOF | {{ bin_dir }}/kubectl apply -f -
         apiVersion: v1
         kind: Pod
@@ -107,6 +108,7 @@
               seccompProfile:
                 type: RuntimeDefault
         EOF
+      executable: /bin/bash
     changed_when: false
     loop:
     - agnhost1
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 8d1e5122ebfd72b73582fa0b31760ba95fa6d3a0..37cf85131e5e3b20aa0f605519e8c9bf1be94f2e 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -173,6 +173,7 @@
       # heuristics by using the cmd parameter like this:
       shell:
         cmd: |
+          set -o pipefail
           cat <<EOF | {{ bin_dir }}/kubectl create -f -
           apiVersion: "k8s.cni.cncf.io/v1"
           kind: NetworkAttachmentDefinition
@@ -196,6 +197,7 @@
             }
           }'
           EOF
+        executable: /bin/bash
       when:
         - inventory_hostname == groups['kube_control_plane'][0]
         - kube_network_plugin_multus|default(false)|bool
@@ -207,6 +209,7 @@
       # heuristics by using the cmd parameter like this:
       shell:
         cmd: |
+          set -o pipefail
           cat <<EOF | {{ bin_dir }}/kubectl create -f -
           apiVersion: v1
           kind: Pod
@@ -220,6 +223,7 @@
               command: ["/bin/bash", "-c", "sleep 2000000000000"]
               image: dougbtv/centos-network
           EOF
+        executable: /bin/bash
       when:
         - inventory_hostname == groups['kube_control_plane'][0]
         - kube_network_plugin_multus|default(false)|bool
diff --git a/tests/testcases/100_check-k8s-conformance.yml b/tests/testcases/100_check-k8s-conformance.yml
index 7b45929d789af30c17444e455e9a99992bfe6918..a64ef11de26c2d36fb50847965de132c810234dc 100644
--- a/tests/testcases/100_check-k8s-conformance.yml
+++ b/tests/testcases/100_check-k8s-conformance.yml
@@ -17,6 +17,7 @@
       get_url:
         url: "https://github.com/heptio/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_{{ sonobuoy_arch }}.tar.gz"
         dest: /tmp/sonobuoy.tar.gz
+        mode: 0644
 
     - name: Extract sonobuoy
       unarchive:
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index 96419e8a2836f5859dee08a2ec839c7d085a2b87..c8a7e2eca26b3aecf3fd22226e0f398eeb510ff6 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -5,7 +5,7 @@
   when: inventory_hostname in groups['kube_control_plane']
 
 - name: Compress directory cluster-dump
-  archive:
+  community.general.archive:
     path: /tmp/cluster-dump
     dest: /tmp/cluster-dump.tgz
     mode: 0644