From 36e5d742dc2b3f7984398c38009f236be7c3c065 Mon Sep 17 00:00:00 2001
From: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
Date: Wed, 26 Jul 2023 16:36:22 +0200
Subject: [PATCH] Resolve ansible-lint name errors (#10253)

* project: fix ansible-lint name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: ignore jinja template error in names

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: capitalize ansible name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: update notify after name capitalization

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
---
 .ansible-lint                                 | 13 +---
 contrib/azurerm/generate-inventory.yml        |  3 +-
 contrib/azurerm/generate-inventory_2.yml      |  3 +-
 contrib/azurerm/generate-templates.yml        |  3 +-
 contrib/dind/dind-cluster.yaml                |  6 +-
 .../dind/roles/dind-cluster/tasks/main.yaml   |  6 +-
 contrib/dind/roles/dind-host/tasks/main.yaml  |  4 +-
 contrib/kvm-setup/kvm-setup.yml               |  3 +-
 .../kvm-setup/roles/kvm-setup/tasks/main.yml  |  8 +--
 contrib/mitogen/mitogen.yml                   | 11 +--
 .../network-storage/glusterfs/glusterfs.yml   | 15 ++--
 .../roles/glusterfs/client/tasks/main.yml     |  6 +-
 .../roles/glusterfs/server/tasks/main.yml     | 12 ++--
 .../heketi/heketi-tear-down.yml               |  6 +-
 contrib/network-storage/heketi/heketi.yml     |  6 +-
 .../heketi/roles/provision/handlers/main.yml  |  2 +-
 contrib/offline/generate_list.yml             |  6 +-
 contrib/os-services/os-services.yml           |  3 +-
 .../os-services/roles/prepare/tasks/main.yml  |  3 +-
 .../migrate_openstack_provider.yml            |  6 +-
 extra_playbooks/upgrade-only-k8s.yml          |  9 ++-
 extra_playbooks/wait-for-cloud-init.yml       |  3 +-
 playbooks/ansible_version.yml                 |  3 +-
 playbooks/cluster.yml                         | 30 +++++---
 playbooks/facts.yml                           |  3 +-
 playbooks/legacy_groups.yml                   | 10 +--
 playbooks/recover_control_plane.yml           | 15 ++--
 playbooks/remove_node.yml                     | 12 ++--
 playbooks/reset.yml                           |  6 +-
 playbooks/scale.yml                           |  5 +-
 playbooks/upgrade_cluster.yml                 | 18 +++--
 roles/bastion-ssh-config/tasks/main.yml       |  4 +-
 roles/bootstrap-os/tasks/main.yml             | 27 ++++---
 .../containerd-common/tasks/main.yml          |  6 +-
 .../containerd/handlers/main.yml              |  2 +-
 .../containerd/molecule/default/prepare.yml   |  3 +-
 .../containerd/tasks/main.yml                 | 47 ++++++------
 .../containerd/tasks/reset.yml                |  8 +--
 .../cri-dockerd/handlers/main.yml             | 22 +++---
 .../cri-dockerd/molecule/default/prepare.yml  |  3 +-
 .../cri-dockerd/tasks/main.yml                |  6 +-
 .../container-engine/cri-o/handlers/main.yml  |  2 +-
 .../cri-o/molecule/default/prepare.yml        |  3 +-
 .../container-engine/cri-o/tasks/cleanup.yaml |  2 +-
 roles/container-engine/cri-o/tasks/main.yaml  | 72 +++++++++----------
 .../container-engine/crictl/tasks/crictl.yml  |  2 +-
 roles/container-engine/crictl/tasks/main.yml  |  2 +-
 roles/container-engine/crun/tasks/main.yml    |  2 +-
 .../docker-storage/tasks/main.yml             | 12 ++--
 .../container-engine/docker/handlers/main.yml |  2 +-
 roles/container-engine/docker/tasks/main.yml  | 37 +++++-----
 roles/container-engine/docker/tasks/reset.yml |  2 +-
 .../docker/tasks/set_facts_dns.yml            | 24 +++----
 .../container-engine/docker/tasks/systemd.yml | 12 ++--
 .../gvisor/molecule/default/prepare.yml       |  3 +-
 roles/container-engine/gvisor/tasks/main.yml  |  6 +-
 .../molecule/default/prepare.yml              |  3 +-
 .../kata-containers/tasks/main.yml            | 14 ++--
 roles/container-engine/nerdctl/tasks/main.yml |  8 +--
 roles/container-engine/runc/tasks/main.yml    | 10 +--
 roles/container-engine/skopeo/tasks/main.yml  |  8 +--
 .../validate-container-engine/tasks/main.yml  |  4 +-
 .../youki/molecule/default/prepare.yml        |  3 +-
 roles/container-engine/youki/tasks/main.yml   |  4 +-
 roles/download/tasks/check_pull_required.yml  |  6 +-
 roles/download/tasks/download_container.yml   | 22 +++---
 roles/download/tasks/download_file.yml        | 29 ++++----
 roles/download/tasks/extract_file.yml         |  2 +-
 roles/download/tasks/main.yml                 |  6 +-
 roles/download/tasks/prep_download.yml        | 16 ++---
 roles/download/tasks/prep_kubeadm_images.yml  | 18 ++---
 roles/download/tasks/set_container_facts.yml  | 10 +--
 roles/etcd/handlers/main.yml                  | 34 ++++-----
 roles/etcd/tasks/configure.yml                |  3 +-
 roles/etcd/tasks/gen_certs_script.yml         | 14 ++--
 roles/etcd/tasks/install_docker.yml           |  8 ++-
 roles/etcd/tasks/install_host.yml             |  8 +--
 roles/etcd/tasks/join_etcd-events_member.yml  |  3 +-
 roles/etcd/tasks/join_etcd_member.yml         |  3 +-
 roles/etcd/tasks/main.yml                     | 28 +++++---
 roles/etcd/tasks/refresh_config.yml           |  4 +-
 roles/etcdctl/tasks/main.yml                  |  6 +-
 .../cloud_controller/oci/tasks/main.yml       |  3 +-
 .../cluster_roles/tasks/main.yml              |  3 +-
 .../container_runtimes/crun/tasks/main.yaml   |  4 +-
 .../container_runtimes/gvisor/tasks/main.yaml |  8 +--
 .../container_runtimes/youki/tasks/main.yaml  |  4 +-
 .../csi_driver/azuredisk/tasks/main.yml       |  3 +-
 .../csi_driver/cinder/tasks/main.yml          |  3 +-
 .../csi_driver/vsphere/tasks/main.yml         | 13 ++--
 .../openstack/tasks/main.yml                  |  3 +-
 .../vsphere/tasks/main.yml                    |  3 +-
 .../network_plugin/kube-router/tasks/main.yml |  4 +-
 .../snapshot-controller/tasks/main.yml        |  2 +-
 roles/kubernetes/client/tasks/main.yml        |  2 +-
 .../control-plane/tasks/kubeadm-secondary.yml |  2 +-
 .../control-plane/tasks/kubeadm-setup.yml     | 40 +++++------
 .../control-plane/tasks/kubeadm-upgrade.yml   | 10 +--
 roles/kubernetes/control-plane/tasks/main.yml |  6 +-
 roles/kubernetes/kubeadm/tasks/main.yml       |  4 +-
 .../azure-credential-check.yml                | 34 ++++-----
 .../openstack-credential-check.yml            | 12 ++--
 .../vsphere-credential-check.yml              |  2 +-
 roles/kubernetes/node/tasks/facts.yml         | 24 ++++---
 roles/kubernetes/node/tasks/install.yml       |  4 +-
 roles/kubernetes/node/tasks/kubelet.yml       |  2 +-
 .../node/tasks/loadbalancer/haproxy.yml       | 10 +--
 .../node/tasks/loadbalancer/kube-vip.yml      |  4 +-
 .../node/tasks/loadbalancer/nginx-proxy.yml   | 10 +--
 roles/kubernetes/node/tasks/main.yml          | 24 ++++---
 .../preinstall/tasks/0010-swapoff.yml         |  2 +-
 .../preinstall/tasks/0020-set_facts.yml       | 61 ++++++++--------
 .../preinstall/tasks/0040-verify-settings.yml |  2 +-
 .../preinstall/tasks/0060-resolvconf.yml      |  6 +-
 .../tasks/0063-networkmanager-dns.yml         |  2 +-
 .../preinstall/tasks/0070-system-packages.yml |  3 +-
 .../tasks/0120-growpart-azure-centos-7.yml    | 10 +--
 roles/kubernetes/preinstall/tasks/main.yml    | 45 ++++++++----
 roles/kubernetes/tokens/tasks/main.yml        |  6 +-
 .../kubespray-defaults/tasks/fallback_ips.yml |  4 +-
 roles/kubespray-defaults/tasks/main.yaml      |  4 +-
 roles/network_plugin/calico/handlers/main.yml |  6 +-
 .../calico/rr/tasks/update-node.yml           |  3 +-
 roles/network_plugin/calico/tasks/install.yml | 21 ++++--
 roles/network_plugin/calico/tasks/main.yml    |  9 ++-
 roles/network_plugin/calico/tasks/pre.yml     |  3 +-
 roles/network_plugin/calico/tasks/reset.yml   | 10 +--
 roles/network_plugin/cilium/tasks/main.yml    |  9 ++-
 roles/network_plugin/cilium/tasks/reset.yml   |  2 +-
 .../cilium/tasks/reset_iface.yml              |  4 +-
 roles/network_plugin/flannel/tasks/reset.yml  |  8 +--
 .../kube-router/handlers/main.yml             |  2 +-
 .../kube-router/tasks/annotate.yml            |  6 +-
 .../network_plugin/kube-router/tasks/main.yml | 22 +++---
 .../kube-router/tasks/reset.yml               | 10 +--
 roles/network_plugin/macvlan/tasks/main.yml   |  6 +-
 roles/network_plugin/ovn4nfv/tasks/main.yml   |  4 +-
 .../recover_control_plane/etcd/tasks/main.yml |  3 +-
 roles/remove-node/post-remove/tasks/main.yml  |  2 +-
 roles/remove-node/pre-remove/tasks/main.yml   |  6 +-
 roles/reset/tasks/main.yml                    | 71 +++++++++---------
 roles/upgrade/post-upgrade/tasks/main.yml     |  2 +-
 scripts/collect-info.yaml                     |  5 +-
 test-infra/image-builder/cluster.yml          |  3 +-
 .../roles/kubevirt-images/tasks/main.yml      |  6 +-
 tests/cloud_playbooks/cleanup-packet.yml      |  3 +-
 tests/cloud_playbooks/create-aws.yml          |  3 +-
 tests/cloud_playbooks/create-do.yml           |  9 +--
 tests/cloud_playbooks/create-gce.yml          |  9 +--
 tests/cloud_playbooks/create-packet.yml       |  3 +-
 tests/cloud_playbooks/delete-aws.yml          |  3 +-
 tests/cloud_playbooks/delete-gce.yml          | 11 +--
 tests/cloud_playbooks/delete-packet.yml       |  3 +-
 .../roles/packet-ci/tasks/main.yml            |  9 ++-
 tests/cloud_playbooks/upload-logs-gcs.yml     |  5 +-
 tests/cloud_playbooks/wait-for-ssh.yml        |  3 +-
 tests/testcases/010_check-apiserver.yml       |  3 +-
 tests/testcases/015_check-nodes-ready.yml     |  3 +-
 tests/testcases/020_check-pods-running.yml    |  3 +-
 tests/testcases/030_check-network.yml         |  3 +-
 tests/testcases/040_check-network-adv.yml     |  6 +-
 tests/testcases/100_check-k8s-conformance.yml |  3 +-
 162 files changed, 842 insertions(+), 675 deletions(-)

diff --git a/.ansible-lint b/.ansible-lint
index 394209ac4..1410ccc42 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -22,18 +22,7 @@ skip_list:
   # (Disabled in Feb 2023)
   - 'fqcn-builtins'
 
-  # names should start with an uppercase letter
-  # (Disabled in June 2023 after ansible upgrade; FIXME)
-  - 'name[casing]'
-
-  # Everything should be named
-  # (Disabled in June 2023 after ansible upgrade; FIXME)
-  - 'name[play]'
-  - 'name[missing]'
-
-  # templates should only be at the end of 'name'
-  # (Disabled in June 2023 after ansible upgrade; FIXME)
-  - 'name[jinja]'
+  # We use template in names
   - 'name[template]'
 
   # order of keys errors
diff --git a/contrib/azurerm/generate-inventory.yml b/contrib/azurerm/generate-inventory.yml
index 2f5373d89..01ee38662 100644
--- a/contrib/azurerm/generate-inventory.yml
+++ b/contrib/azurerm/generate-inventory.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Generate Azure inventory
+  hosts: localhost
   gather_facts: False
   roles:
     - generate-inventory
diff --git a/contrib/azurerm/generate-inventory_2.yml b/contrib/azurerm/generate-inventory_2.yml
index bec06c46d..9173e1d82 100644
--- a/contrib/azurerm/generate-inventory_2.yml
+++ b/contrib/azurerm/generate-inventory_2.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Generate Azure inventory
+  hosts: localhost
   gather_facts: False
   roles:
     - generate-inventory_2
diff --git a/contrib/azurerm/generate-templates.yml b/contrib/azurerm/generate-templates.yml
index 3d4b1ca01..f1fcb626f 100644
--- a/contrib/azurerm/generate-templates.yml
+++ b/contrib/azurerm/generate-templates.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Generate Azure templates
+  hosts: localhost
   gather_facts: False
   roles:
     - generate-templates
diff --git a/contrib/dind/dind-cluster.yaml b/contrib/dind/dind-cluster.yaml
index 3fcae1eb0..258837d08 100644
--- a/contrib/dind/dind-cluster.yaml
+++ b/contrib/dind/dind-cluster.yaml
@@ -1,9 +1,11 @@
 ---
-- hosts: localhost
+- name: Create nodes as docker containers
+  hosts: localhost
   gather_facts: False
   roles:
     - { role: dind-host }
 
-- hosts: containers
+- name: Customize each node containers
+  hosts: containers
   roles:
     - { role: dind-cluster }
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 2d74f7ea7..1cf819f68 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -1,9 +1,9 @@
 ---
-- name: set_fact distro_setup
+- name: Set_fact distro_setup
   set_fact:
     distro_setup: "{{ distro_settings[node_distro] }}"
 
-- name: set_fact other distro settings
+- name: Set_fact other distro settings
   set_fact:
     distro_user: "{{ distro_setup['user'] }}"
     distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
@@ -66,7 +66,7 @@
     dest: "/etc/sudoers.d/{{ distro_user }}"
     mode: 0640
 
-- name: Add my pubkey to "{{ distro_user }}" user authorized keys
+- name: "Add my pubkey to {{ distro_user }} user authorized keys"
   ansible.posix.authorized_key:
     user: "{{ distro_user }}"
     state: present
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 030ce7266..e44047f4d 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -1,9 +1,9 @@
 ---
-- name: set_fact distro_setup
+- name: Set_fact distro_setup
   set_fact:
     distro_setup: "{{ distro_settings[node_distro] }}"
 
-- name: set_fact other distro settings
+- name: Set_fact other distro settings
   set_fact:
     distro_image: "{{ distro_setup['image'] }}"
     distro_init: "{{ distro_setup['init'] }}"
diff --git a/contrib/kvm-setup/kvm-setup.yml b/contrib/kvm-setup/kvm-setup.yml
index 0496d78b7..b8d440587 100644
--- a/contrib/kvm-setup/kvm-setup.yml
+++ b/contrib/kvm-setup/kvm-setup.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Prepare Hypervisor to later install kubespray VMs
+  hosts: localhost
   gather_facts: False
   become: yes
   vars:
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
index fa89836d4..3e8ade645 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
@@ -22,9 +22,9 @@
     - ntp
   when: ansible_os_family == "Debian"
 
-# Create deployment user if required
-- include_tasks: user.yml
+- name: Create deployment user if required
+  include_tasks: user.yml
   when: k8s_deployment_user is defined
 
-# Set proper sysctl values
-- import_tasks: sysctl.yml
+- name: Set proper sysctl values
+  import_tasks: sysctl.yml
diff --git a/contrib/mitogen/mitogen.yml b/contrib/mitogen/mitogen.yml
index 7b93faf2f..1ccc9a99c 100644
--- a/contrib/mitogen/mitogen.yml
+++ b/contrib/mitogen/mitogen.yml
@@ -2,7 +2,8 @@
 - name: Check ansible version
   import_playbook: kubernetes_sigs.kubespray.ansible_version
 
-- hosts: localhost
+- name: Install mitogen
+  hosts: localhost
   strategy: linear
   vars:
     mitogen_version: 0.3.2
@@ -19,24 +20,24 @@
         - "{{ playbook_dir }}/plugins/mitogen"
         - "{{ playbook_dir }}/dist"
 
-    - name: download mitogen release
+    - name: Download mitogen release
       get_url:
         url: "{{ mitogen_url }}"
         dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
         validate_certs: true
         mode: 0644
 
-    - name: extract archive
+    - name: Extract archive
       unarchive:
         src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
         dest: "{{ playbook_dir }}/dist/"
 
-    - name: copy plugin
+    - name: Copy plugin
       ansible.posix.synchronize:
         src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
         dest: "{{ playbook_dir }}/plugins/mitogen"
 
-    - name: add strategy to ansible.cfg
+    - name: Add strategy to ansible.cfg
       community.general.ini_file:
         path: ansible.cfg
         mode: 0644
diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml
index 79fc3aeb9..d5ade945b 100644
--- a/contrib/network-storage/glusterfs/glusterfs.yml
+++ b/contrib/network-storage/glusterfs/glusterfs.yml
@@ -1,24 +1,29 @@
 ---
-- hosts: gfs-cluster
+- name: Bootstrap hosts
+  hosts: gfs-cluster
   gather_facts: false
   vars:
     ansible_ssh_pipelining: false
   roles:
     - { role: bootstrap-os, tags: bootstrap-os}
 
-- hosts: all
+- name: Gather facts
+  hosts: all
   gather_facts: true
 
-- hosts: gfs-cluster
+- name: Install glusterfs server
+  hosts: gfs-cluster
   vars:
     ansible_ssh_pipelining: true
   roles:
     - { role: glusterfs/server }
 
-- hosts: k8s_cluster
+- name: Install glusterfs servers
+  hosts: k8s_cluster
   roles:
     - { role: glusterfs/client }
 
-- hosts: kube_control_plane[0]
+- name: Configure Kubernetes to use glusterfs
+  hosts: kube_control_plane[0]
   roles:
     - { role: kubernetes-pv }
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
index 151ea5751..248f21efa 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
@@ -3,10 +3,12 @@
 # hyperkube and needs to be installed as part of the system.
 
 # Setup/install tasks.
-- include_tasks: setup-RedHat.yml
+- name: Setup RedHat distros for glusterfs
+  include_tasks: setup-RedHat.yml
   when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
 
-- include_tasks: setup-Debian.yml
+- name: Setup Debian distros for glusterfs
+  include_tasks: setup-Debian.yml
   when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
 
 - name: Ensure Gluster mount directories exist.
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 64e7691bb..50f849c01 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -4,13 +4,13 @@
   include_vars: "{{ ansible_os_family }}.yml"
 
 # Install xfs package
-- name: install xfs Debian
+- name: Install xfs Debian
   apt:
     name: xfsprogs
     state: present
   when: ansible_os_family == "Debian"
 
-- name: install xfs RedHat
+- name: Install xfs RedHat
   package:
     name: xfsprogs
     state: present
@@ -23,7 +23,7 @@
     dev: "{{ disk_volume_device_1 }}"
 
 # Mount external volumes
-- name: mounting new xfs filesystem
+- name: Mounting new xfs filesystem
   ansible.posix.mount:
     name: "{{ gluster_volume_node_mount_dir }}"
     src: "{{ disk_volume_device_1 }}"
@@ -31,10 +31,12 @@
     state: mounted
 
 # Setup/install tasks.
-- include_tasks: setup-RedHat.yml
+- name: Setup RedHat distros for glusterfs
+  include_tasks: setup-RedHat.yml
   when: ansible_os_family == 'RedHat'
 
-- include_tasks: setup-Debian.yml
+- name: Setup Debian distros for glusterfs
+  include_tasks: setup-Debian.yml
   when: ansible_os_family == 'Debian'
 
 - name: Ensure GlusterFS is started and enabled at boot.
diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml
index 9e2d1f45a..e64f085cb 100644
--- a/contrib/network-storage/heketi/heketi-tear-down.yml
+++ b/contrib/network-storage/heketi/heketi-tear-down.yml
@@ -1,9 +1,11 @@
 ---
-- hosts: kube_control_plane[0]
+- name: Tear down heketi
+  hosts: kube_control_plane[0]
   roles:
     - { role: tear-down }
 
-- hosts: heketi-node
+- name: Teardown disks in heketi
+  hosts: heketi-node
   become: yes
   roles:
     - { role: tear-down-disks }
diff --git a/contrib/network-storage/heketi/heketi.yml b/contrib/network-storage/heketi/heketi.yml
index 2309267b1..bc0c4d0fb 100644
--- a/contrib/network-storage/heketi/heketi.yml
+++ b/contrib/network-storage/heketi/heketi.yml
@@ -1,9 +1,11 @@
 ---
-- hosts: heketi-node
+- name: Prepare heketi install
+  hosts: heketi-node
   roles:
     - { role: prepare }
 
-- hosts: kube_control_plane[0]
+- name: Provision heketi
+  hosts: kube_control_plane[0]
   tags:
     - "provision"
   roles:
diff --git a/contrib/network-storage/heketi/roles/provision/handlers/main.yml b/contrib/network-storage/heketi/roles/provision/handlers/main.yml
index 9e876de17..4e768adda 100644
--- a/contrib/network-storage/heketi/roles/provision/handlers/main.yml
+++ b/contrib/network-storage/heketi/roles/provision/handlers/main.yml
@@ -1,3 +1,3 @@
 ---
-- name: "stop port forwarding"
+- name: "Stop port forwarding"
   command: "killall "
diff --git a/contrib/offline/generate_list.yml b/contrib/offline/generate_list.yml
index 5442425bc..bebf34968 100644
--- a/contrib/offline/generate_list.yml
+++ b/contrib/offline/generate_list.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Collect container images for offline deployment
+  hosts: localhost
   become: no
 
   roles:
@@ -11,7 +12,8 @@
 
   tasks:
     # Generate files.list and images.list files from templates.
-    - template:
+    - name: Collect container images for offline deployment
+      template:
         src: ./contrib/offline/temp/{{ item }}.list.template
         dest: ./contrib/offline/temp/{{ item }}.list
         mode: 0644
diff --git a/contrib/os-services/os-services.yml b/contrib/os-services/os-services.yml
index 34c9d8c4b..eb120d4cb 100644
--- a/contrib/os-services/os-services.yml
+++ b/contrib/os-services/os-services.yml
@@ -1,4 +1,5 @@
 ---
-- hosts: all
+- name: Disable firewalld/ufw
+  hosts: all
   roles:
     - { role: prepare }
diff --git a/contrib/os-services/roles/prepare/tasks/main.yml b/contrib/os-services/roles/prepare/tasks/main.yml
index cf7262234..f7ed34137 100644
--- a/contrib/os-services/roles/prepare/tasks/main.yml
+++ b/contrib/os-services/roles/prepare/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- block:
+- name: Disable firewalld and ufw
+  block:
   - name: List services
     service_facts:
 
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index 14a6c2769..a82a58710 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_node:kube_control_plane
+- name: Remove old cloud provider config
+  hosts: kube_node:kube_control_plane
   tasks:
     - name: Remove old cloud provider config
       file:
@@ -7,7 +8,8 @@
         state: absent
       with_items:
         - /etc/kubernetes/cloud_config
-- hosts: kube_control_plane[0]
+- name: Migrate intree Cinder PV
+  hosts: kube_control_plane[0]
   tasks:
     - name: Include kubespray-default variables
       include_vars: ../roles/kubespray-defaults/defaults/main.yaml
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 13ebcc4bd..4207f8d28 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -10,13 +10,15 @@
 ### In most cases, you probably want to use upgrade-cluster.yml playbook and
 ### not this one.
 
-- hosts: localhost
+- name: Setup ssh config to use the bastion
+  hosts: localhost
   gather_facts: False
   roles:
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: k8s_cluster:etcd:calico_rr
+- name: Bootstrap hosts OS for Ansible
+  hosts: k8s_cluster:etcd:calico_rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
   vars:
@@ -27,7 +29,8 @@
     - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
-- hosts: k8s_cluster:etcd:calico_rr
+- name: Preinstall
+  hosts: k8s_cluster:etcd:calico_rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/extra_playbooks/wait-for-cloud-init.yml b/extra_playbooks/wait-for-cloud-init.yml
index 7aa92d4c1..82c419456 100644
--- a/extra_playbooks/wait-for-cloud-init.yml
+++ b/extra_playbooks/wait-for-cloud-init.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: all
+- name: Wait for cloud-init to finish
+  hosts: all
   tasks:
     - name: Wait for cloud-init to finish
       command: cloud-init status --wait
diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml
index 7e8a0df4c..236af1357 100644
--- a/playbooks/ansible_version.yml
+++ b/playbooks/ansible_version.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Check Ansible version
+  hosts: localhost
   gather_facts: false
   become: no
   vars:
diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml
index 6e61459a1..991ae2312 100644
--- a/playbooks/cluster.yml
+++ b/playbooks/cluster.yml
@@ -5,7 +5,8 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -16,7 +17,8 @@
   tags: always
   import_playbook: facts.yml
 
-- hosts: k8s_cluster:etcd
+- name: Prepare for etcd install
+  hosts: k8s_cluster:etcd
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -26,7 +28,8 @@
     - { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
     - { role: download, tags: download, when: "not skip_downloads" }
 
-- hosts: etcd:kube_control_plane
+- name: Install etcd
+  hosts: etcd:kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -39,7 +42,8 @@
         etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
       when: etcd_deployment_type != "kubeadm"
 
-- hosts: k8s_cluster
+- name: Install etcd certs on nodes if required
+  hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -55,7 +59,8 @@
         - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
         - kube_network_plugin != "calico" or calico_datastore == "etcd"
 
-- hosts: k8s_cluster
+- name: Install Kubernetes nodes
+  hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -63,7 +68,8 @@
     - { role: kubespray-defaults }
     - { role: kubernetes/node, tags: node }
 
-- hosts: kube_control_plane
+- name: Install the control plane
+  hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -73,7 +79,8 @@
     - { role: kubernetes/client, tags: client }
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
 
-- hosts: k8s_cluster
+- name: Invoke kubeadm and install a CNI
+  hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -84,7 +91,8 @@
     - { role: network_plugin, tags: network }
     - { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
 
-- hosts: calico_rr
+- name: Install Calico Route Reflector
+  hosts: calico_rr
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -92,7 +100,8 @@
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
 
-- hosts: kube_control_plane[0]
+- name: Patch Kubernetes for Windows
+  hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -100,7 +109,8 @@
     - { role: kubespray-defaults }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
-- hosts: kube_control_plane
+- name: Install Kubernetes apps
+  hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
diff --git a/playbooks/facts.yml b/playbooks/facts.yml
index 4fff54f11..77823aca4 100644
--- a/playbooks/facts.yml
+++ b/playbooks/facts.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: k8s_cluster:etcd:calico_rr
+- name: Bootstrap hosts for Ansible
+  hosts: k8s_cluster:etcd:calico_rr
   strategy: linear
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
diff --git a/playbooks/legacy_groups.yml b/playbooks/legacy_groups.yml
index 0d017106f..643032ff0 100644
--- a/playbooks/legacy_groups.yml
+++ b/playbooks/legacy_groups.yml
@@ -6,7 +6,7 @@
   gather_facts: false
   tags: always
   tasks:
-    - name: add nodes to kube_control_plane group
+    - name: Add nodes to kube_control_plane group
       group_by:
         key: 'kube_control_plane'
 
@@ -15,7 +15,7 @@
   gather_facts: false
   tags: always
   tasks:
-    - name: add nodes to kube_node group
+    - name: Add nodes to kube_node group
       group_by:
         key: 'kube_node'
 
@@ -24,7 +24,7 @@
   gather_facts: false
   tags: always
   tasks:
-    - name: add nodes to k8s_cluster group
+    - name: Add nodes to k8s_cluster group
       group_by:
         key: 'k8s_cluster'
 
@@ -33,7 +33,7 @@
   gather_facts: false
   tags: always
   tasks:
-    - name: add nodes to calico_rr group
+    - name: Add nodes to calico_rr group
       group_by:
         key: 'calico_rr'
 
@@ -42,6 +42,6 @@
   gather_facts: false
   tags: always
   tasks:
-    - name: add nodes to no-floating group
+    - name: Add nodes to no-floating group
       group_by:
         key: 'no_floating'
diff --git a/playbooks/recover_control_plane.yml b/playbooks/recover_control_plane.yml
index 77ec5bec4..d2bb57427 100644
--- a/playbooks/recover_control_plane.yml
+++ b/playbooks/recover_control_plane.yml
@@ -5,29 +5,34 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: etcd[0]
+- name: Recover etcd
+  hosts: etcd[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
     - role: recover_control_plane/etcd
       when: etcd_deployment_type != "kubeadm"
 
-- hosts: kube_control_plane[0]
+- name: Recover control plane
+  hosts: kube_control_plane[0]
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
     - { role: recover_control_plane/control-plane }
 
-- import_playbook: cluster.yml
+- name: Apply whole cluster install
+  import_playbook: cluster.yml
 
-- hosts: kube_control_plane
+- name: Perform post recover tasks
+  hosts: kube_control_plane
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml
index be346a768..63df85938 100644
--- a/playbooks/remove_node.yml
+++ b/playbooks/remove_node.yml
@@ -5,14 +5,16 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults }
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
 
-- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
+- name: Confirm node removal
+  hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
   gather_facts: no
   tasks:
     - name: Confirm Execution
@@ -32,7 +34,8 @@
   import_playbook: facts.yml
   when: reset_nodes | default(True) | bool
 
-- hosts: "{{ node | default('kube_node') }}"
+- name: Reset node
+  hosts: "{{ node | default('kube_node') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -42,7 +45,8 @@
     - { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
 
 # Currently cannot remove first master or etcd
-- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
+- name: Post node removal
+  hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/playbooks/reset.yml b/playbooks/reset.yml
index ded4c0ada..0b4312fbd 100644
--- a/playbooks/reset.yml
+++ b/playbooks/reset.yml
@@ -5,7 +5,8 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -15,7 +16,8 @@
 - name: Gather facts
   import_playbook: facts.yml
 
-- hosts: etcd:k8s_cluster:calico_rr
+- name: Reset cluster
+  hosts: etcd:k8s_cluster:calico_rr
   gather_facts: False
   pre_tasks:
     - name: Reset Confirmation
diff --git a/playbooks/scale.yml b/playbooks/scale.yml
index 4d95543b8..007a65650 100644
--- a/playbooks/scale.yml
+++ b/playbooks/scale.yml
@@ -5,7 +5,8 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -88,7 +89,7 @@
       environment: "{{ proxy_disable_env }}"
       register: kubeadm_upload_cert
       changed_when: false
-    - name: set fact 'kubeadm_certificate_key' for later use
+    - name: Set fact 'kubeadm_certificate_key' for later use
       set_fact:
         kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
       when: kubeadm_certificate_key is not defined
diff --git a/playbooks/upgrade_cluster.yml b/playbooks/upgrade_cluster.yml
index 272ec310f..d5469989c 100644
--- a/playbooks/upgrade_cluster.yml
+++ b/playbooks/upgrade_cluster.yml
@@ -5,7 +5,8 @@
 - name: Ensure compatibility with old groups
   import_playbook: legacy_groups.yml
 
-- hosts: bastion[0]
+- name: Install bastion ssh config
+  hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -46,7 +47,8 @@
     - { role: kubespray-defaults }
     - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
 
-- hosts: etcd:kube_control_plane
+- name: Install etcd
+  hosts: etcd:kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -59,7 +61,8 @@
         etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
       when: etcd_deployment_type != "kubeadm"
 
-- hosts: k8s_cluster
+- name: Install etcd certs on nodes if required
+  hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -126,7 +129,8 @@
     - { role: kubernetes/node-label, tags: node-label }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
-- hosts: kube_control_plane[0]
+- name: Patch Kubernetes for Windows
+  hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: true
   environment: "{{ proxy_disable_env }}"
@@ -134,7 +138,8 @@
     - { role: kubespray-defaults }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
-- hosts: calico_rr
+- name: Install Calico Route Reflector
+  hosts: calico_rr
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -142,7 +147,8 @@
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: network }
 
-- hosts: kube_control_plane
+- name: Install Kubernetes apps
+  hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml
index a18291b3b..920763eb5 100644
--- a/roles/bastion-ssh-config/tasks/main.yml
+++ b/roles/bastion-ssh-config/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: set bastion host IP and port
+- name: Set bastion host IP and port
   set_fact:
     bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
     bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}"
@@ -12,7 +12,7 @@
   set_fact:
     real_user: "{{ ansible_user }}"
 
-- name: create ssh bastion conf
+- name: Create ssh bastion conf
   become: false
   delegate_to: localhost
   connection: local
diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml
index 42321fd37..73c9e060f 100644
--- a/roles/bootstrap-os/tasks/main.yml
+++ b/roles/bootstrap-os/tasks/main.yml
@@ -6,37 +6,46 @@
   # This command should always run, even in check mode
   check_mode: false
 
-- include_tasks: bootstrap-centos.yml
+- name: Bootstrap CentOS
+  include_tasks: bootstrap-centos.yml
   when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines  or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
 
-- include_tasks: bootstrap-amazon.yml
+- name: Bootstrap Amazon
+  include_tasks: bootstrap-amazon.yml
   when: '''ID="amzn"'' in os_release.stdout_lines'
 
-- include_tasks: bootstrap-redhat.yml
+- name: Bootstrap RedHat
+  include_tasks: bootstrap-redhat.yml
   when: '''ID="rhel"'' in os_release.stdout_lines'
 
-- include_tasks: bootstrap-clearlinux.yml
+- name: Bootstrap Clear Linux
+  include_tasks: bootstrap-clearlinux.yml
   when: '''ID=clear-linux-os'' in os_release.stdout_lines'
 
 # Fedora CoreOS
-- include_tasks: bootstrap-fedora-coreos.yml
+- name: Bootstrap Fedora CoreOS
+  include_tasks: bootstrap-fedora-coreos.yml
   when:
     - '''ID=fedora'' in os_release.stdout_lines'
     - '''VARIANT_ID=coreos'' in os_release.stdout_lines'
 
-- include_tasks: bootstrap-flatcar.yml
+- name: Bootstrap Flatcar
+  include_tasks: bootstrap-flatcar.yml
   when: '''ID=flatcar'' in os_release.stdout_lines'
 
-- include_tasks: bootstrap-debian.yml
+- name: Bootstrap Debian
+  include_tasks: bootstrap-debian.yml
   when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
 
 # Fedora "classic"
-- include_tasks: bootstrap-fedora.yml
+- name: Boostrap Fedora
+  include_tasks: bootstrap-fedora.yml
   when:
     - '''ID=fedora'' in os_release.stdout_lines'
     - '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
 
-- include_tasks: bootstrap-opensuse.yml
+- name: Bootstrap OpenSUSE
+  include_tasks: bootstrap-opensuse.yml
   when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
 
 - name: Create remote_tmp for it is used by another module
diff --git a/roles/container-engine/containerd-common/tasks/main.yml b/roles/container-engine/containerd-common/tasks/main.yml
index fcca4fb64..d0cf1f139 100644
--- a/roles/container-engine/containerd-common/tasks/main.yml
+++ b/roles/container-engine/containerd-common/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: containerd-common | check if fedora coreos
+- name: Containerd-common | check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -7,11 +7,11 @@
     get_mime: no
   register: ostree
 
-- name: containerd-common | set is_ostree
+- name: Containerd-common | set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
 
-- name: containerd-common | gather os specific variables
+- name: Containerd-common | gather os specific variables
   include_vars: "{{ item }}"
   with_first_found:
     - files:
diff --git a/roles/container-engine/containerd/handlers/main.yml b/roles/container-engine/containerd/handlers/main.yml
index d2f12658f..3c132bdf0 100644
--- a/roles/container-engine/containerd/handlers/main.yml
+++ b/roles/container-engine/containerd/handlers/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: restart containerd
+- name: Restart containerd
   command: /bin/true
   notify:
     - Containerd | restart containerd
diff --git a/roles/container-engine/containerd/molecule/default/prepare.yml b/roles/container-engine/containerd/molecule/default/prepare.yml
index 100673cb4..ddc9c0453 100644
--- a/roles/container-engine/containerd/molecule/default/prepare.yml
+++ b/roles/container-engine/containerd/molecule/default/prepare.yml
@@ -12,7 +12,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index e3ee58643..a8e1e769c 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -5,33 +5,33 @@
   when:
     - not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions)
 
-- name: containerd | Remove any package manager controlled containerd package
+- name: Containerd | Remove any package manager controlled containerd package
   package:
     name: "{{ containerd_package }}"
     state: absent
   when:
     - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
 
-- name: containerd | Remove containerd repository
+- name: Containerd | Remove containerd repository
   file:
     path: "{{ yum_repo_dir }}/containerd.repo"
     state: absent
   when:
     - ansible_os_family in ['RedHat']
 
-- name: containerd | Remove containerd repository
+- name: Containerd | Remove containerd repository
   apt_repository:
     repo: "{{ item }}"
     state: absent
   with_items: "{{ containerd_repo_info.repos }}"
   when: ansible_pkg_mgr == 'apt'
 
-- name: containerd | Download containerd
+- name: Containerd | Download containerd
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.containerd) }}"
 
-- name: containerd | Unpack containerd archive
+- name: Containerd | Unpack containerd archive
   unarchive:
     src: "{{ downloads.containerd.dest }}"
     dest: "{{ containerd_bin_dir }}"
@@ -39,9 +39,9 @@
     remote_src: yes
     extra_opts:
       - --strip-components=1
-  notify: restart containerd
+  notify: Restart containerd
 
-- name: containerd | Remove orphaned binary
+- name: Containerd | Remove orphaned binary
   file:
     path: "/usr/bin/{{ item }}"
     state: absent
@@ -56,14 +56,14 @@
     - containerd-shim-runc-v2
     - ctr
 
-- name: containerd | Generate systemd service for containerd
+- name: Containerd | Generate systemd service for containerd
   template:
     src: containerd.service.j2
     dest: /etc/systemd/system/containerd.service
     mode: 0644
-  notify: restart containerd
+  notify: Restart containerd
 
-- name: containerd | Ensure containerd directories exist
+- name: Containerd | Ensure containerd directories exist
   file:
     dest: "{{ item }}"
     state: directory
@@ -76,50 +76,51 @@
     - "{{ containerd_storage_dir }}"
     - "{{ containerd_state_dir }}"
 
-- name: containerd | Write containerd proxy drop-in
+- name: Containerd | Write containerd proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
     mode: 0644
-  notify: restart containerd
+  notify: Restart containerd
   when: http_proxy is defined or https_proxy is defined
 
-- name: containerd | Generate default base_runtime_spec
+- name: Containerd | Generate default base_runtime_spec
   register: ctr_oci_spec
   command: "{{ containerd_bin_dir }}/ctr oci spec"
   check_mode: false
   changed_when: false
 
-- name: containerd | Store generated default base_runtime_spec
+- name: Containerd | Store generated default base_runtime_spec
   set_fact:
     containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
 
-- name: containerd | Write base_runtime_specs
+- name: Containerd | Write base_runtime_specs
   copy:
     content: "{{ item.value }}"
     dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
     owner: "root"
     mode: 0644
   with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
-  notify: restart containerd
+  notify: Restart containerd
 
-- name: containerd | Copy containerd config file
+- name: Containerd | Copy containerd config file
   template:
     src: config.toml.j2
     dest: "{{ containerd_cfg_dir }}/config.toml"
     owner: "root"
     mode: 0640
-  notify: restart containerd
+  notify: Restart containerd
 
-- block:
-    - name: containerd | Create registry directories
+- name: Containerd | Configure containerd registries
+  block:
+    - name: Containerd | Create registry directories
       file:
         path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
         state: directory
         mode: 0755
         recurse: true
       with_dict: "{{ containerd_insecure_registries }}"
-    - name: containerd | Write hosts.toml file
+    - name: Containerd | Write hosts.toml file
       blockinfile:
         path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
         mode: 0640
@@ -134,10 +135,10 @@
 
 # you can sometimes end up in a state where everything is installed
 # but containerd was not started / enabled
-- name: containerd | Flush handlers
+- name: Containerd | Flush handlers
   meta: flush_handlers
 
-- name: containerd | Ensure containerd is started and enabled
+- name: Containerd | Ensure containerd is started and enabled
   systemd:
     name: containerd
     daemon_reload: yes
diff --git a/roles/container-engine/containerd/tasks/reset.yml b/roles/container-engine/containerd/tasks/reset.yml
index 1788e4ea9..517e56da6 100644
--- a/roles/container-engine/containerd/tasks/reset.yml
+++ b/roles/container-engine/containerd/tasks/reset.yml
@@ -1,5 +1,5 @@
 ---
-- name: containerd | Remove containerd repository for RedHat os family
+- name: Containerd | Remove containerd repository for RedHat os family
   file:
     path: "{{ yum_repo_dir }}/containerd.repo"
     state: absent
@@ -8,7 +8,7 @@
   tags:
     - reset_containerd
 
-- name: containerd | Remove containerd repository for Debian os family
+- name: Containerd | Remove containerd repository for Debian os family
   apt_repository:
     repo: "{{ item }}"
     state: absent
@@ -17,7 +17,7 @@
   tags:
     - reset_containerd
 
-- name: containerd | Stop containerd service
+- name: Containerd | Stop containerd service
   service:
     name: containerd
     daemon_reload: true
@@ -26,7 +26,7 @@
   tags:
     - reset_containerd
 
-- name: containerd | Remove configuration files
+- name: Containerd | Remove configuration files
   file:
     path: "{{ item }}"
     state: absent
diff --git a/roles/container-engine/cri-dockerd/handlers/main.yml b/roles/container-engine/cri-dockerd/handlers/main.yml
index 9d9d8c643..3990d3397 100644
--- a/roles/container-engine/cri-dockerd/handlers/main.yml
+++ b/roles/container-engine/cri-dockerd/handlers/main.yml
@@ -1,35 +1,35 @@
 ---
-- name: restart and enable cri-dockerd
+- name: Restart and enable cri-dockerd
   command: /bin/true
   notify:
-    - cri-dockerd | reload systemd
-    - cri-dockerd | restart docker.service
-    - cri-dockerd | reload cri-dockerd.socket
-    - cri-dockerd | reload cri-dockerd.service
-    - cri-dockerd | enable cri-dockerd service
+    - Cri-dockerd | reload systemd
+    - Cri-dockerd | restart docker.service
+    - Cri-dockerd | reload cri-dockerd.socket
+    - Cri-dockerd | reload cri-dockerd.service
+    - Cri-dockerd | enable cri-dockerd service
 
-- name: cri-dockerd | reload systemd
+- name: Cri-dockerd | reload systemd
   systemd:
     name: cri-dockerd
     daemon_reload: true
     masked: no
 
-- name: cri-dockerd | restart docker.service
+- name: Cri-dockerd | restart docker.service
   service:
     name: docker.service
     state: restarted
 
-- name: cri-dockerd | reload cri-dockerd.socket
+- name: Cri-dockerd | reload cri-dockerd.socket
   service:
     name: cri-dockerd.socket
     state: restarted
 
-- name: cri-dockerd | reload cri-dockerd.service
+- name: Cri-dockerd | reload cri-dockerd.service
   service:
     name: cri-dockerd.service
     state: restarted
 
-- name: cri-dockerd | enable cri-dockerd service
+- name: Cri-dockerd | enable cri-dockerd service
   service:
     name: cri-dockerd.service
     enabled: yes
diff --git a/roles/container-engine/cri-dockerd/molecule/default/prepare.yml b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml
index c54feaca2..83449f842 100644
--- a/roles/container-engine/cri-dockerd/molecule/default/prepare.yml
+++ b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml
@@ -8,7 +8,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/cri-dockerd/tasks/main.yml b/roles/container-engine/cri-dockerd/tasks/main.yml
index 9ce3ec6ef..f8965fd04 100644
--- a/roles/container-engine/cri-dockerd/tasks/main.yml
+++ b/roles/container-engine/cri-dockerd/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: runc | Download cri-dockerd binary
+- name: Runc | Download cri-dockerd binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.cri_dockerd) }}"
@@ -11,7 +11,7 @@
     mode: 0755
     remote_src: true
   notify:
-    - restart and enable cri-dockerd
+    - Restart and enable cri-dockerd
 
 - name: Generate cri-dockerd systemd unit files
   template:
@@ -22,7 +22,7 @@
     - cri-dockerd.service
     - cri-dockerd.socket
   notify:
-    - restart and enable cri-dockerd
+    - Restart and enable cri-dockerd
 
 - name: Flush handlers
   meta: flush_handlers
diff --git a/roles/container-engine/cri-o/handlers/main.yml b/roles/container-engine/cri-o/handlers/main.yml
index 8bc936b45..763f4b558 100644
--- a/roles/container-engine/cri-o/handlers/main.yml
+++ b/roles/container-engine/cri-o/handlers/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: restart crio
+- name: Restart crio
   command: /bin/true
   notify:
     - CRI-O | reload systemd
diff --git a/roles/container-engine/cri-o/molecule/default/prepare.yml b/roles/container-engine/cri-o/molecule/default/prepare.yml
index ec47a1e5b..103b0d33e 100644
--- a/roles/container-engine/cri-o/molecule/default/prepare.yml
+++ b/roles/container-engine/cri-o/molecule/default/prepare.yml
@@ -12,7 +12,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml
index 2c3872229..2b8251c4e 100644
--- a/roles/container-engine/cri-o/tasks/cleanup.yaml
+++ b/roles/container-engine/cri-o/tasks/cleanup.yaml
@@ -109,7 +109,7 @@
     - 1.23
     - 1.24
 
-- name: cri-o | remove installed packages
+- name: Cri-o | remove installed packages
   package:
     name: "{{ item }}"
     state: absent
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index 4a667ac9a..6344f0393 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -1,5 +1,5 @@
 ---
-- name: cri-o | check if fedora coreos
+- name: Cri-o | check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -7,48 +7,48 @@
     get_mime: no
   register: ostree
 
-- name: cri-o | set is_ostree
+- name: Cri-o | set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
 
-- name: cri-o | get ostree version
+- name: Cri-o | get ostree version
   shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
   args:
     executable: /bin/bash
   register: ostree_version
   when: is_ostree
 
-- name: cri-o | Download cri-o
+- name: Cri-o | Download cri-o
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.crio) }}"
 
-- name: cri-o | special handling for amazon linux
+- name: Cri-o | special handling for amazon linux
   import_tasks: "setup-amazon.yaml"
   when: ansible_distribution in ["Amazon"]
 
-- name: cri-o | clean up reglacy repos
+- name: Cri-o | clean up reglacy repos
   import_tasks: "cleanup.yaml"
 
-- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
+- name: Cri-o | build a list of crio runtimes with Katacontainers runtimes
   set_fact:
     crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
   when:
     - kata_containers_enabled
 
-- name: cri-o | build a list of crio runtimes with crun runtime
+- name: Cri-o | build a list of crio runtimes with crun runtime
   set_fact:
     crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
   when:
     - crun_enabled
 
-- name: cri-o | build a list of crio runtimes with youki runtime
+- name: Cri-o | build a list of crio runtimes with youki runtime
   set_fact:
     crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
   when:
     - youki_enabled
 
-- name: cri-o | make sure needed folders exist in the system
+- name: Cri-o | make sure needed folders exist in the system
   with_items:
     - /etc/crio
     - /etc/containers
@@ -58,21 +58,21 @@
     state: directory
     mode: 0755
 
-- name: cri-o | install cri-o config
+- name: Cri-o | install cri-o config
   template:
     src: crio.conf.j2
     dest: /etc/crio/crio.conf
     mode: 0644
   register: config_install
 
-- name: cri-o | install config.json
+- name: Cri-o | install config.json
   template:
     src: config.json.j2
     dest: /etc/crio/config.json
     mode: 0644
   register: reg_auth_install
 
-- name: cri-o | copy binaries
+- name: Cri-o | copy binaries
   copy:
     src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
     dest: "{{ bin_dir }}/{{ item }}"
@@ -80,48 +80,48 @@
     remote_src: true
   with_items:
     - "{{ crio_bin_files }}"
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | copy service file
+- name: Cri-o | copy service file
   copy:
     src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
     dest: /etc/systemd/system/crio.service
     mode: 0755
     remote_src: true
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | update the bin dir for crio.service file
+- name: Cri-o | update the bin dir for crio.service file
   replace:
     dest: /etc/systemd/system/crio.service
     regexp: "/usr/local/bin/crio"
     replace: "{{ bin_dir }}/crio"
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | copy default policy
+- name: Cri-o | copy default policy
   copy:
     src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
     dest: /etc/containers/policy.json
     mode: 0755
     remote_src: true
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | copy mounts.conf
+- name: Cri-o | copy mounts.conf
   copy:
     src: mounts.conf
     dest: /etc/containers/mounts.conf
     mode: 0644
   when:
     - ansible_os_family == 'RedHat'
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | create directory for oci hooks
+- name: Cri-o | create directory for oci hooks
   file:
     path: /etc/containers/oci/hooks.d
     state: directory
     owner: root
     mode: 0755
 
-- name: cri-o | set overlay driver
+- name: Cri-o | set overlay driver
   community.general.ini_file:
     dest: /etc/containers/storage.conf
     section: storage
@@ -135,7 +135,7 @@
       value: '"/var/lib/containers/storage"'
 
 # metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
-- name: cri-o | set metacopy mount options correctly
+- name: Cri-o | set metacopy mount options correctly
   community.general.ini_file:
     dest: /etc/containers/storage.conf
     section: storage.options.overlay
@@ -143,37 +143,37 @@
     value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
     mode: 0644
 
-- name: cri-o | create directory registries configs
+- name: Cri-o | create directory registries configs
   file:
     path: /etc/containers/registries.conf.d
     state: directory
     owner: root
     mode: 0755
 
-- name: cri-o | write registries configs
+- name: Cri-o | write registries configs
   template:
     src: registry.conf.j2
     dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
     mode: 0644
   loop: "{{ crio_registries }}"
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | configure unqualified registry settings
+- name: Cri-o | configure unqualified registry settings
   template:
     src: unqualified.conf.j2
     dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
     mode: 0644
-  notify: restart crio
+  notify: Restart crio
 
-- name: cri-o | write cri-o proxy drop-in
+- name: Cri-o | write cri-o proxy drop-in
   template:
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/crio.service.d/http-proxy.conf
     mode: 0644
-  notify: restart crio
+  notify: Restart crio
   when: http_proxy is defined or https_proxy is defined
 
-- name: cri-o | configure the uid/gid space for user namespaces
+- name: Cri-o | configure the uid/gid space for user namespaces
   lineinfile:
     path: '{{ item.path }}'
     line: '{{ item.entry }}'
@@ -187,7 +187,7 @@
   loop_control:
     label: '{{ item.path }}'
 
-- name: cri-o | ensure crio service is started and enabled
+- name: Cri-o | ensure crio service is started and enabled
   service:
     name: crio
     daemon_reload: true
@@ -195,7 +195,7 @@
     state: started
   register: service_start
 
-- name: cri-o | trigger service restart only when needed
+- name: Cri-o | trigger service restart only when needed
   service:
     name: crio
     state: restarted
@@ -203,7 +203,7 @@
     - config_install.changed or reg_auth_install.changed
     - not service_start.changed
 
-- name: cri-o | verify that crio is running
+- name: Cri-o | verify that crio is running
   command: "{{ bin_dir }}/crio-status info"
   register: get_crio_info
   until: get_crio_info is succeeded
diff --git a/roles/container-engine/crictl/tasks/crictl.yml b/roles/container-engine/crictl/tasks/crictl.yml
index 36e09e4a8..cffa05056 100644
--- a/roles/container-engine/crictl/tasks/crictl.yml
+++ b/roles/container-engine/crictl/tasks/crictl.yml
@@ -1,5 +1,5 @@
 ---
-- name: crictl | Download crictl
+- name: Crictl | Download crictl
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.crictl) }}"
diff --git a/roles/container-engine/crictl/tasks/main.yml b/roles/container-engine/crictl/tasks/main.yml
index a07112696..9337016c1 100644
--- a/roles/container-engine/crictl/tasks/main.yml
+++ b/roles/container-engine/crictl/tasks/main.yml
@@ -1,3 +1,3 @@
 ---
-- name: install crictl
+- name: Install crictl
   include_tasks: crictl.yml
diff --git a/roles/container-engine/crun/tasks/main.yml b/roles/container-engine/crun/tasks/main.yml
index 1de9ce56d..c21bb3ffe 100644
--- a/roles/container-engine/crun/tasks/main.yml
+++ b/roles/container-engine/crun/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: crun | Download crun binary
+- name: Crun | Download crun binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.crun) }}"
diff --git a/roles/container-engine/docker-storage/tasks/main.yml b/roles/container-engine/docker-storage/tasks/main.yml
index 462938191..ec129753d 100644
--- a/roles/container-engine/docker-storage/tasks/main.yml
+++ b/roles/container-engine/docker-storage/tasks/main.yml
@@ -1,18 +1,18 @@
 ---
 
-- name: docker-storage-setup | install git and make
+- name: Docker-storage-setup | install git and make
   with_items: [git, make]
   package:
     pkg: "{{ item }}"
     state: present
 
-- name: docker-storage-setup | docker-storage-setup sysconfig template
+- name: Docker-storage-setup | docker-storage-setup sysconfig template
   template:
     src: docker-storage-setup.j2
     dest: /etc/sysconfig/docker-storage-setup
     mode: 0644
 
-- name: docker-storage-override-directory | docker service storage-setup override dir
+- name: Docker-storage-override-directory | docker service storage-setup override dir
   file:
     dest: /etc/systemd/system/docker.service.d
     mode: 0755
@@ -20,7 +20,7 @@
     group: root
     state: directory
 
-- name: docker-storage-override | docker service storage-setup override file
+- name: Docker-storage-override | docker service storage-setup override file
   copy:
     dest: /etc/systemd/system/docker.service.d/override.conf
     content: |-
@@ -33,12 +33,12 @@
     mode: 0644
 
 # https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
-- name: docker-storage-setup | install lvm2
+- name: Docker-storage-setup | install lvm2
   package:
     name: lvm2
     state: present
 
-- name: docker-storage-setup | install and run container-storage-setup
+- name: Docker-storage-setup | install and run container-storage-setup
   become: yes
   script: |
     install_container_storage_setup.sh \
diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml
index 8c26de273..14a7b3973 100644
--- a/roles/container-engine/docker/handlers/main.yml
+++ b/roles/container-engine/docker/handlers/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: restart docker
+- name: Restart docker
   command: /bin/true
   notify:
     - Docker | reload systemd
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index 9413ba914..cf81ce2b1 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: check if fedora coreos
+- name: Check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -7,18 +7,18 @@
     get_mime: no
   register: ostree
 
-- name: set is_ostree
+- name: Set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
 
-- name: set docker_version for openEuler
+- name: Set docker_version for openEuler
   set_fact:
     docker_version: '19.03'
   when: ansible_distribution == "openEuler"
   tags:
     - facts
 
-- name: gather os specific variables
+- name: Gather os specific variables
   include_vars: "{{ item }}"
   with_first_found:
     - files:
@@ -44,14 +44,16 @@
     msg: "SUSE distributions always install Docker from the distro repos"
   when: ansible_pkg_mgr == 'zypper'
 
-- include_tasks: set_facts_dns.yml
+- name: Gather DNS facts
+  include_tasks: set_facts_dns.yml
   when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
   tags:
     - facts
 
-- import_tasks: pre-upgrade.yml
+- name: Pre-upgrade docker
+  import_tasks: pre-upgrade.yml
 
-- name: ensure docker-ce repository public key is installed
+- name: Ensure docker-ce repository public key is installed
   apt_key:
     id: "{{ item }}"
     url: "{{ docker_repo_key_info.url }}"
@@ -64,7 +66,7 @@
   environment: "{{ proxy_env }}"
   when: ansible_pkg_mgr == 'apt'
 
-- name: ensure docker-ce repository is enabled
+- name: Ensure docker-ce repository is enabled
   apt_repository:
     repo: "{{ item }}"
     state: present
@@ -99,7 +101,7 @@
     - docker-ce
     - docker-ce-cli
 
-- name: ensure docker packages are installed
+- name: Ensure docker packages are installed
   package:
     name: "{{ docker_package_info.pkgs }}"
     state: "{{ docker_package_info.state | default('present') }}"
@@ -117,7 +119,7 @@
   until: docker_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | d(3) }}"
-  notify: restart docker
+  notify: Restart docker
   when:
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
     - not is_ostree
@@ -135,9 +137,9 @@
     - docker-ce
     - docker-ce-cli
 
-- name: ensure docker started, remove our config if docker start failed and try again
+- name: Ensure docker started, remove our config if docker start failed and try again
   block:
-    - name: ensure service is started if docker packages are already present
+    - name: Ensure service is started if docker packages are already present
       service:
         name: docker
         state: started
@@ -145,7 +147,7 @@
   rescue:
     - debug:  # noqa name[missing]
         msg: "Docker start failed. Try to remove our config"
-    - name: remove kubespray generated config
+    - name: Remove kubespray generated config
       file:
         path: "{{ item }}"
         state: absent
@@ -154,13 +156,14 @@
         - /etc/systemd/system/docker.service.d/docker-options.conf
         - /etc/systemd/system/docker.service.d/docker-dns.conf
         - /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf
-      notify: restart docker
+      notify: Restart docker
 
-- name: flush handlers so we can wait for docker to come up
+- name: Flush handlers so we can wait for docker to come up
   meta: flush_handlers
 
 # Install each plugin using a looped include to make error handling in the included task simpler.
-- include_tasks: docker_plugin.yml
+- name: Install docker plugin
+  include_tasks: docker_plugin.yml
   loop: "{{ docker_plugins }}"
   loop_control:
     loop_var: docker_plugin
@@ -168,7 +171,7 @@
 - name: Set docker systemd config
   import_tasks: systemd.yml
 
-- name: ensure docker service is started and enabled
+- name: Ensure docker service is started and enabled
   service:
     name: "{{ item }}"
     enabled: yes
diff --git a/roles/container-engine/docker/tasks/reset.yml b/roles/container-engine/docker/tasks/reset.yml
index 51b79e5a7..4bca908e6 100644
--- a/roles/container-engine/docker/tasks/reset.yml
+++ b/roles/container-engine/docker/tasks/reset.yml
@@ -21,7 +21,7 @@
   ignore_errors: true  # noqa ignore-errors
   when: docker_packages_list | length>0
 
-- name: reset | remove all containers
+- name: Reset | remove all containers
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
   args:
     executable: /bin/bash
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index 9d563a259..d7c10392e 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -1,23 +1,23 @@
 ---
 
-- name: set dns server for docker
+- name: Set dns server for docker
   set_fact:
     docker_dns_servers: "{{ dns_servers }}"
 
-- name: show docker_dns_servers
+- name: Show docker_dns_servers
   debug:
     msg: "{{ docker_dns_servers }}"
 
-- name: add upstream dns servers
+- name: Add upstream dns servers
   set_fact:
     docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
   when: dns_mode in ['coredns', 'coredns_dual']
 
-- name: add global searchdomains
+- name: Add global searchdomains
   set_fact:
     docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
 
-- name: check system nameservers
+- name: Check system nameservers
   shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
   args:
     executable: /bin/bash
@@ -25,7 +25,7 @@
   register: system_nameservers
   check_mode: no
 
-- name: check system search domains
+- name: Check system search domains
   # noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
   # Therefore -o pipefail is not applicable in this specific instance
   shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
@@ -35,32 +35,32 @@
   register: system_search_domains
   check_mode: no
 
-- name: add system nameservers to docker options
+- name: Add system nameservers to docker options
   set_fact:
     docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}"
   when: system_nameservers.stdout
 
-- name: add system search domains to docker options
+- name: Add system search domains to docker options
   set_fact:
     docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
   when: system_search_domains.stdout
 
-- name: check number of nameservers
+- name: Check number of nameservers
   fail:
     msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
   when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
 
-- name: rtrim number of nameservers to 3
+- name: Rtrim number of nameservers to 3
   set_fact:
     docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
   when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
 
-- name: check number of search domains
+- name: Check number of search domains
   fail:
     msg: "Too many search domains"
   when: docker_dns_search_domains | length > 6
 
-- name: check length of search domains
+- name: Check length of search domains
   fail:
     msg: "Search domains exceeded limit of 256 characters"
   when: docker_dns_search_domains | join(' ') | length > 256
diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml
index 7deff7752..57d9b9c5a 100644
--- a/roles/container-engine/docker/tasks/systemd.yml
+++ b/roles/container-engine/docker/tasks/systemd.yml
@@ -10,10 +10,10 @@
     src: http-proxy.conf.j2
     dest: /etc/systemd/system/docker.service.d/http-proxy.conf
     mode: 0644
-  notify: restart docker
+  notify: Restart docker
   when: http_proxy is defined or https_proxy is defined
 
-- name: get systemd version
+- name: Get systemd version
   # noqa command-instead-of-module - systemctl is called intentionally here
   shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
   args:
@@ -29,7 +29,7 @@
     dest: /etc/systemd/system/docker.service
     mode: 0644
   register: docker_service_file
-  notify: restart docker
+  notify: Restart docker
   when:
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
     - not is_fedora_coreos
@@ -39,14 +39,14 @@
     src: docker-options.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
     mode: 0644
-  notify: restart docker
+  notify: Restart docker
 
 - name: Write docker dns systemd drop-in
   template:
     src: docker-dns.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
     mode: 0644
-  notify: restart docker
+  notify: Restart docker
   when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
 
 - name: Copy docker orphan clean up script to the node
@@ -61,7 +61,7 @@
     src: docker-orphan-cleanup.conf.j2
     dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
     mode: 0644
-  notify: restart docker
+  notify: Restart docker
   when: docker_orphan_clean_up | bool
 
 - name: Flush handlers
diff --git a/roles/container-engine/gvisor/molecule/default/prepare.yml b/roles/container-engine/gvisor/molecule/default/prepare.yml
index 8f9ef7ddf..3ec360225 100644
--- a/roles/container-engine/gvisor/molecule/default/prepare.yml
+++ b/roles/container-engine/gvisor/molecule/default/prepare.yml
@@ -8,7 +8,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/gvisor/tasks/main.yml b/roles/container-engine/gvisor/tasks/main.yml
index 41b115220..1a8277b72 100644
--- a/roles/container-engine/gvisor/tasks/main.yml
+++ b/roles/container-engine/gvisor/tasks/main.yml
@@ -1,15 +1,15 @@
 ---
-- name: gVisor | Download runsc binary
+- name: GVisor | Download runsc binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}"
 
-- name: gVisor | Download containerd-shim-runsc-v1 binary
+- name: GVisor | Download containerd-shim-runsc-v1 binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}"
 
-- name: gVisor | Copy binaries
+- name: GVisor | Copy binaries
   copy:
     src: "{{ item.src }}"
     dest: "{{ bin_dir }}/{{ item.dest }}"
diff --git a/roles/container-engine/kata-containers/molecule/default/prepare.yml b/roles/container-engine/kata-containers/molecule/default/prepare.yml
index 8a0978f56..9d7019a6d 100644
--- a/roles/container-engine/kata-containers/molecule/default/prepare.yml
+++ b/roles/container-engine/kata-containers/molecule/default/prepare.yml
@@ -8,7 +8,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml
index 9d1bf9126..e61d89f60 100644
--- a/roles/container-engine/kata-containers/tasks/main.yml
+++ b/roles/container-engine/kata-containers/tasks/main.yml
@@ -1,23 +1,23 @@
 ---
-- name: kata-containers | Download kata binary
+- name: Kata-containers | Download kata binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.kata_containers) }}"
 
-- name: kata-containers | Copy kata-containers binary
+- name: Kata-containers | Copy kata-containers binary
   unarchive:
     src: "{{ downloads.kata_containers.dest }}"
     dest: "/"
     mode: 0755
     remote_src: yes
 
-- name: kata-containers | Create config directory
+- name: Kata-containers | Create config directory
   file:
     path: "{{ kata_containers_config_dir }}"
     state: directory
     mode: 0755
 
-- name: kata-containers | Set configuration
+- name: Kata-containers | Set configuration
   template:
     src: "{{ item }}.j2"
     dest: "{{ kata_containers_config_dir }}/{{ item }}"
@@ -25,7 +25,7 @@
   with_items:
     - configuration-qemu.toml
 
-- name: kata-containers | Set containerd bin
+- name: Kata-containers | Set containerd bin
   vars:
     shim: "{{ item }}"
   template:
@@ -35,7 +35,7 @@
   with_items:
     - qemu
 
-- name: kata-containers | Load vhost kernel modules
+- name: Kata-containers | Load vhost kernel modules
   community.general.modprobe:
     state: present
     name: "{{ item }}"
@@ -43,7 +43,7 @@
     - vhost_vsock
     - vhost_net
 
-- name: kata-containers | Persist vhost kernel modules
+- name: Kata-containers | Persist vhost kernel modules
   copy:
     dest: /etc/modules-load.d/kubespray-kata-containers.conf
     mode: 0644
diff --git a/roles/container-engine/nerdctl/tasks/main.yml b/roles/container-engine/nerdctl/tasks/main.yml
index ad088391f..e4e4ebd15 100644
--- a/roles/container-engine/nerdctl/tasks/main.yml
+++ b/roles/container-engine/nerdctl/tasks/main.yml
@@ -1,10 +1,10 @@
 ---
-- name: nerdctl | Download nerdctl
+- name: Nerdctl | Download nerdctl
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.nerdctl) }}"
 
-- name: nerdctl | Copy nerdctl binary from download dir
+- name: Nerdctl | Copy nerdctl binary from download dir
   copy:
     src: "{{ local_release_dir }}/nerdctl"
     dest: "{{ bin_dir }}/nerdctl"
@@ -17,7 +17,7 @@
     - Get nerdctl completion
     - Install nerdctl completion
 
-- name: nerdctl | Create configuration dir
+- name: Nerdctl | Create configuration dir
   file:
     path: /etc/nerdctl
     state: directory
@@ -26,7 +26,7 @@
     group: root
   become: true
 
-- name: nerdctl | Install nerdctl configuration
+- name: Nerdctl | Install nerdctl configuration
   template:
     src: nerdctl.toml.j2
     dest: /etc/nerdctl/nerdctl.toml
diff --git a/roles/container-engine/runc/tasks/main.yml b/roles/container-engine/runc/tasks/main.yml
index 7a8e336c2..542a447d5 100644
--- a/roles/container-engine/runc/tasks/main.yml
+++ b/roles/container-engine/runc/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: runc | check if fedora coreos
+- name: Runc | check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -7,18 +7,18 @@
     get_mime: no
   register: ostree
 
-- name: runc | set is_ostree
+- name: Runc | set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
 
-- name: runc | Uninstall runc package managed by package manager
+- name: Runc | Uninstall runc package managed by package manager
   package:
     name: "{{ runc_package_name }}"
     state: absent
   when:
     - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
 
-- name: runc | Download runc binary
+- name: Runc | Download runc binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.runc) }}"
@@ -30,7 +30,7 @@
     mode: 0755
     remote_src: true
 
-- name: runc | Remove orphaned binary
+- name: Runc | Remove orphaned binary
   file:
     path: /usr/bin/runc
     state: absent
diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml
index 033ae629f..cef0424cd 100644
--- a/roles/container-engine/skopeo/tasks/main.yml
+++ b/roles/container-engine/skopeo/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: skopeo | check if fedora coreos
+- name: Skopeo | check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -7,11 +7,11 @@
     get_mime: no
   register: ostree
 
-- name: skopeo | set is_ostree
+- name: Skopeo | set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
 
-- name: skopeo | Uninstall skopeo package managed by package manager
+- name: Skopeo | Uninstall skopeo package managed by package manager
   package:
     name: skopeo
     state: absent
@@ -19,7 +19,7 @@
     - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
   ignore_errors: true  # noqa ignore-errors
 
-- name: skopeo | Download skopeo binary
+- name: Skopeo | Download skopeo binary
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.skopeo) }}"
diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml
index fdd60e0e2..2221eb7e0 100644
--- a/roles/container-engine/validate-container-engine/tasks/main.yml
+++ b/roles/container-engine/validate-container-engine/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: validate-container-engine | check if fedora coreos
+- name: Validate-container-engine | check if fedora coreos
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -9,7 +9,7 @@
   tags:
     - facts
 
-- name: validate-container-engine | set is_ostree
+- name: Validate-container-engine | set is_ostree
   set_fact:
     is_ostree: "{{ ostree.stat.exists }}"
   tags:
diff --git a/roles/container-engine/youki/molecule/default/prepare.yml b/roles/container-engine/youki/molecule/default/prepare.yml
index e9486865f..119f58add 100644
--- a/roles/container-engine/youki/molecule/default/prepare.yml
+++ b/roles/container-engine/youki/molecule/default/prepare.yml
@@ -8,7 +8,8 @@
     - role: adduser
       user: "{{ addusers.kube }}"
   tasks:
-    - include_tasks: "../../../../download/tasks/download_file.yml"
+    - name: Download CNI
+      include_tasks: "../../../../download/tasks/download_file.yml"
       vars:
         download: "{{ download_defaults | combine(downloads.cni) }}"
 
diff --git a/roles/container-engine/youki/tasks/main.yml b/roles/container-engine/youki/tasks/main.yml
index 1095c3d2e..d617963df 100644
--- a/roles/container-engine/youki/tasks/main.yml
+++ b/roles/container-engine/youki/tasks/main.yml
@@ -1,10 +1,10 @@
 ---
-- name: youki | Download youki
+- name: Youki | Download youki
   include_tasks: "../../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.youki) }}"
 
-- name: youki | Copy youki binary from download dir
+- name: Youki | Copy youki binary from download dir
   copy:
     src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki"
     dest: "{{ youki_bin_dir }}/youki"
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index c0681a7ec..e5ae1dcf3 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -1,20 +1,20 @@
 ---
 # The image_info_command depends on the Container Runtime and will output something like the following:
 # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
-- name: check_pull_required |  Generate a list of information about the images on a node  # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
+- name: Check_pull_required |  Generate a list of information about the images on a node  # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
   shell: "{{ image_info_command }}"
   register: docker_images
   changed_when: false
   check_mode: no
   when: not download_always_pull
 
-- name: check_pull_required | Set pull_required if the desired image is not yet loaded
+- name: Check_pull_required | Set pull_required if the desired image is not yet loaded
   set_fact:
     pull_required: >-
       {%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
   when: not download_always_pull
 
-- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag
+- name: Check_pull_required | Check that the local digest sha256 corresponds to the given image tag
   assert:
     that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
   when:
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 426b00804..b8a320a4b 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -1,6 +1,6 @@
 ---
 - block:
-    - name: set default values for flag variables
+    - name: Set default values for flag variables
       set_fact:
         image_is_cached: false
         image_changed: false
@@ -8,12 +8,12 @@
       tags:
         - facts
 
-    - name: download_container | Set a few facts
+    - name: Download_container | Set a few facts
       import_tasks: set_container_facts.yml
       tags:
         - facts
 
-    - name: download_container | Prepare container download
+    - name: Download_container | Prepare container download
       include_tasks: check_pull_required.yml
       when:
         - not download_always_pull
@@ -21,7 +21,7 @@
     - debug:  # noqa name[missing]
         msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
 
-    - name: download_container | Determine if image is in cache
+    - name: Download_container | Determine if image is in cache
       stat:
         path: "{{ image_path_cached }}"
         get_attributes: no
@@ -36,7 +36,7 @@
       when:
         - download_force_cache
 
-    - name: download_container | Set fact indicating if image is in cache
+    - name: Download_container | Set fact indicating if image is in cache
       set_fact:
         image_is_cached: "{{ cache_image.stat.exists }}"
       tags:
@@ -52,7 +52,7 @@
         - download_force_cache
         - not download_run_once
 
-    - name: download_container | Download image if required
+    - name: Download_container | Download image if required
       command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
       delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
       delegate_facts: yes
@@ -67,7 +67,7 @@
         - pull_required or download_run_once
         - not image_is_cached
 
-    - name: download_container | Save and compress image
+    - name: Download_container | Save and compress image
       shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"  # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
       delegate_to: "{{ download_delegate }}"
       delegate_facts: no
@@ -79,7 +79,7 @@
         - not image_is_cached
         - download_run_once
 
-    - name: download_container | Copy image to ansible host cache
+    - name: Download_container | Copy image to ansible host cache
       ansible.posix.synchronize:
         src: "{{ image_path_final }}"
         dest: "{{ image_path_cached }}"
@@ -91,7 +91,7 @@
         - not download_localhost
         - download_delegate == inventory_hostname
 
-    - name: download_container | Upload image to node if it is cached
+    - name: Download_container | Upload image to node if it is cached
       ansible.posix.synchronize:
         src: "{{ image_path_cached }}"
         dest: "{{ image_path_final }}"
@@ -107,7 +107,7 @@
         - pull_required
         - download_force_cache
 
-    - name: download_container | Load image into the local container registry
+    - name: Download_container | Load image into the local container registry
       shell: "{{ image_load_command }}"  # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
       register: container_load_status
       failed_when: container_load_status is failed
@@ -115,7 +115,7 @@
         - pull_required
         - download_force_cache
 
-    - name: download_container | Remove container image from cache
+    - name: Download_container | Remove container image from cache
       file:
         state: absent
         path: "{{ image_path_final }}"
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 0db1eec68..5ab10b8a8 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -1,21 +1,22 @@
 ---
-- block:
-  - name: prep_download | Set a few facts
+- name: "Download_file | download {{ download.dest }}"
+  block:
+  - name: Prep_download | Set a few facts
     set_fact:
       download_force_cache: "{{ true if download_run_once else download_force_cache }}"
 
-  - name: download_file | Starting download of file
+  - name: Download_file | Starting download of file
     debug:
       msg: "{{ download.url }}"
     run_once: "{{ download_run_once }}"
 
-  - name: download_file | Set pathname of cached file
+  - name: Download_file | Set pathname of cached file
     set_fact:
       file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}"
     tags:
     - facts
 
-  - name: download_file | Create dest directory on node
+  - name: Download_file | Create dest directory on node
     file:
       path: "{{ download.dest | dirname }}"
       owner: "{{ download.owner | default(omit) }}"
@@ -23,7 +24,7 @@
       state: directory
       recurse: yes
 
-  - name: download_file | Create local cache directory
+  - name: Download_file | Create local cache directory
     file:
       path: "{{ file_path_cached | dirname }}"
       state: directory
@@ -38,7 +39,7 @@
     tags:
     - localhost
 
-  - name: download_file | Create cache directory on download_delegate host
+  - name: Download_file | Create cache directory on download_delegate host
     file:
       path: "{{ file_path_cached | dirname }}"
       state: directory
@@ -52,7 +53,7 @@
 
   # We check a number of mirrors that may hold the file and pick a working one at random
   # This task will avoid logging it's parameters to not leak environment passwords in the log
-  - name: download_file | Validate mirrors
+  - name: Download_file | Validate mirrors
     uri:
       url: "{{ mirror }}"
       method: HEAD
@@ -75,14 +76,14 @@
     ignore_errors: true
 
   # Ansible 2.9 requires we convert a generator to a list
-  - name: download_file | Get the list of working mirrors
+  - name: Download_file | Get the list of working mirrors
     set_fact:
       valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}"
     delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}"
 
   # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
   # This task will avoid logging it's parameters to not leak environment passwords in the log
-  - name: download_file | Download item
+  - name: Download_file | Download item
     get_url:
       url: "{{ valid_mirror_urls | random }}"
       dest: "{{ file_path_cached if download_force_cache else download.dest }}"
@@ -104,7 +105,7 @@
     environment: "{{ proxy_env }}"
     no_log: "{{ not (unsafe_show_logs | bool) }}"
 
-  - name: download_file | Copy file back to ansible host file cache
+  - name: Download_file | Copy file back to ansible host file cache
     ansible.posix.synchronize:
       src: "{{ file_path_cached }}"
       dest: "{{ file_path_cached }}"
@@ -115,7 +116,7 @@
     - not download_localhost
     - download_delegate == inventory_hostname
 
-  - name: download_file | Copy file from cache to nodes, if it is available
+  - name: Download_file | Copy file from cache to nodes, if it is available
     ansible.posix.synchronize:
       src: "{{ file_path_cached }}"
       dest: "{{ download.dest }}"
@@ -128,7 +129,7 @@
     when:
     - download_force_cache
 
-  - name: download_file | Set mode and owner
+  - name: Download_file | Set mode and owner
     file:
       path: "{{ download.dest }}"
       mode: "{{ download.mode | default(omit) }}"
@@ -136,7 +137,7 @@
     when:
     - download_force_cache
 
-  - name: "download_file | Extract file archives"
+  - name: "Download_file | Extract file archives"
     include_tasks: "extract_file.yml"
 
   tags:
diff --git a/roles/download/tasks/extract_file.yml b/roles/download/tasks/extract_file.yml
index 94f240edb..59d0531f6 100644
--- a/roles/download/tasks/extract_file.yml
+++ b/roles/download/tasks/extract_file.yml
@@ -1,5 +1,5 @@
 ---
-- name: extract_file | Unpacking archive
+- name: Extract_file | Unpacking archive
   unarchive:
     src: "{{ download.dest }}"
     dest: "{{ download.dest | dirname }}"
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 92313a58a..3309ab88e 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: download | Prepare working directories and variables
+- name: Download | Prepare working directories and variables
   import_tasks: prep_download.yml
   when:
     - not skip_downloads | default(false)
@@ -7,7 +7,7 @@
     - download
     - upload
 
-- name: download | Get kubeadm binary and list of required images
+- name: Download | Get kubeadm binary and list of required images
   include_tasks: prep_kubeadm_images.yml
   when:
     - not skip_downloads | default(false)
@@ -16,7 +16,7 @@
     - download
     - upload
 
-- name: download | Download files / images
+- name: Download | Download files / images
   include_tasks: "{{ include_file }}"
   loop: "{{ downloads | combine(kubeadm_images) | dict2items }}"
   vars:
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 0554d1b29..4c737e8e6 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -1,11 +1,11 @@
 ---
-- name: prep_download | Set a few facts
+- name: Prep_download | Set a few facts
   set_fact:
     download_force_cache: "{{ true if download_run_once else download_force_cache }}"
   tags:
     - facts
 
-- name: prep_download | On localhost, check if passwordless root is possible
+- name: Prep_download | On localhost, check if passwordless root is possible
   command: "true"
   delegate_to: localhost
   connection: local
@@ -20,7 +20,7 @@
     - localhost
     - asserts
 
-- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
+- name: Prep_download | On localhost, check if user has access to the container runtime without using sudo
   shell: "{{ image_info_command_on_localhost }}"  # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
   delegate_to: localhost
   connection: local
@@ -35,7 +35,7 @@
     - localhost
     - asserts
 
-- name: prep_download | Parse the outputs of the previous commands
+- name: Prep_download | Parse the outputs of the previous commands
   set_fact:
     user_in_docker_group: "{{ not test_docker.failed }}"
     user_can_become_root: "{{ not test_become.failed }}"
@@ -45,7 +45,7 @@
     - localhost
     - asserts
 
-- name: prep_download | Check that local user is in group or can become root
+- name: Prep_download | Check that local user is in group or can become root
   assert:
     that: "user_in_docker_group or user_can_become_root"
     msg: >-
@@ -56,7 +56,7 @@
     - localhost
     - asserts
 
-- name: prep_download | Register docker images info
+- name: Prep_download | Register docker images info
   shell: "{{ image_info_command }}"  # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
   no_log: "{{ not (unsafe_show_logs | bool) }}"
   register: docker_images
@@ -65,7 +65,7 @@
   check_mode: no
   when: download_container
 
-- name: prep_download | Create staging directory on remote node
+- name: Prep_download | Create staging directory on remote node
   file:
     path: "{{ local_release_dir }}/images"
     state: directory
@@ -75,7 +75,7 @@
   when:
     - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-- name: prep_download | Create local cache for files and images on control node
+- name: Prep_download | Create local cache for files and images on control node
   file:
     path: "{{ download_cache_dir }}/images"
     state: directory
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index e1dc3af4c..fdfed1d08 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -1,12 +1,12 @@
 ---
-- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version
+- name: Prep_kubeadm_images | Check kubeadm version matches kubernetes version
   fail:
     msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}"
   when:
     - not skip_downloads | default(false)
     - not kubeadm_version == downloads.kubeadm.version
 
-- name: prep_kubeadm_images | Download kubeadm binary
+- name: Prep_kubeadm_images | Download kubeadm binary
   include_tasks: "download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.kubeadm) }}"
@@ -14,7 +14,7 @@
     - not skip_downloads | default(false)
     - downloads.kubeadm.enabled
 
-- name: prep_kubeadm_images | Create kubeadm config
+- name: Prep_kubeadm_images | Create kubeadm config
   template:
     src: "kubeadm-images.yaml.j2"
     dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
@@ -22,21 +22,21 @@
   when:
     - not skip_kubeadm_images | default(false)
 
-- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path
+- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
   copy:
     src: "{{ downloads.kubeadm.dest }}"
     dest: "{{ bin_dir }}/kubeadm"
     mode: 0755
     remote_src: true
 
-- name: prep_kubeadm_images | Set kubeadm binary permissions
+- name: Prep_kubeadm_images | Set kubeadm binary permissions
   file:
     path: "{{ bin_dir }}/kubeadm"
     mode: "0755"
     state: file
 
-- name: prep_kubeadm_images | Generate list of required images
-  shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns | pause'"
+- name: Prep_kubeadm_images | Generate list of required images
+  shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
   args:
     executable: /bin/bash
   register: kubeadm_images_raw
@@ -45,7 +45,7 @@
   when:
     - not skip_kubeadm_images | default(false)
 
-- name: prep_kubeadm_images | Parse list of images
+- name: Prep_kubeadm_images | Parse list of images
   vars:
     kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
   set_fact:
@@ -63,7 +63,7 @@
   when:
     - not skip_kubeadm_images | default(false)
 
-- name: prep_kubeadm_images | Convert list of images to dict for later use
+- name: Prep_kubeadm_images | Convert list of images to dict for later use
   set_fact:
     kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
   run_once: true
diff --git a/roles/download/tasks/set_container_facts.yml b/roles/download/tasks/set_container_facts.yml
index 9d36c2484..5b93f2953 100644
--- a/roles/download/tasks/set_container_facts.yml
+++ b/roles/download/tasks/set_container_facts.yml
@@ -1,22 +1,22 @@
 ---
-- name: set_container_facts | Display the name of the image being processed
+- name: Set_container_facts | Display the name of the image being processed
   debug:
     msg: "{{ download.repo }}"
 
-- name: set_container_facts | Set if containers should be pulled by digest
+- name: Set_container_facts | Set if containers should be pulled by digest
   set_fact:
     pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}"
 
-- name: set_container_facts | Define by what name to pull the image
+- name: Set_container_facts | Define by what name to pull the image
   set_fact:
     image_reponame: >-
       {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
 
-- name: set_container_facts | Define file name of image
+- name: Set_container_facts | Define file name of image
   set_fact:
     image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar"
 
-- name: set_container_facts | Define path of image
+- name: Set_container_facts | Define path of image
   set_fact:
     image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}"
     image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}"
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index ccf8f8f64..f09789c25 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,39 +1,40 @@
 ---
-- name: restart etcd
+- name: Restart etcd
   command: /bin/true
   notify:
     - Backup etcd data
-    - etcd | reload systemd
-    - reload etcd
-    - wait for etcd up
+    - Etcd | reload systemd
+    - Reload etcd
+    - Wait for etcd up
     - Cleanup etcd backups
 
-- name: restart etcd-events
+- name: Restart etcd-events
   command: /bin/true
   notify:
-    - etcd | reload systemd
-    - reload etcd-events
-    - wait for etcd-events up
+    - Etcd | reload systemd
+    - Reload etcd-events
+    - Wait for etcd-events up
 
-- import_tasks: backup.yml
+- name: Backup etcd
+  import_tasks: backup.yml
 
-- name: etcd | reload systemd
+- name: Etcd | reload systemd
   systemd:
     daemon_reload: true
 
-- name: reload etcd
+- name: Reload etcd
   service:
     name: etcd
     state: restarted
   when: is_etcd_master
 
-- name: reload etcd-events
+- name: Reload etcd-events
   service:
     name: etcd-events
     state: restarted
   when: is_etcd_master
 
-- name: wait for etcd up
+- name: Wait for etcd up
   uri:
     url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
     validate_certs: no
@@ -44,9 +45,10 @@
   retries: 60
   delay: 1
 
-- import_tasks: backup_cleanup.yml
+- name: Cleanup etcd backups
+  import_tasks: backup_cleanup.yml
 
-- name: wait for etcd-events up
+- name: Wait for etcd-events up
   uri:
     url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
     validate_certs: no
@@ -57,6 +59,6 @@
   retries: 60
   delay: 1
 
-- name: set etcd_secret_changed
+- name: Set etcd_secret_changed
   set_fact:
     etcd_secret_changed: true
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index fe53dd535..f1d6a4872 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -41,7 +41,8 @@
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
     ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
 
-- include_tasks: refresh_config.yml
+- name: Configure | Refresh etcd config
+  include_tasks: refresh_config.yml
   when: is_etcd_master
 
 - name: Configure | Copy etcd.service systemd file
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index cd66de7eb..2ce3e14cf 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -56,7 +56,7 @@
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when: gen_certs | default(false)
-  notify: set etcd_secret_changed
+  notify: Set etcd_secret_changed
 
 - name: Gen_certs | run cert generation script for all clients
   command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
@@ -73,7 +73,7 @@
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
     - gen_certs | default(false)
-  notify: set etcd_secret_changed
+  notify: Set etcd_secret_changed
 
 - name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
   slurp:
@@ -97,7 +97,7 @@
     - inventory_hostname in groups['etcd']
     - sync_certs | default(false)
     - inventory_hostname != groups['etcd'][0]
-  notify: set etcd_secret_changed
+  notify: Set etcd_secret_changed
 
 - name: Gen_certs | Write etcd member/admin and kube_control_plane client certs to other etcd nodes
   copy:
@@ -129,7 +129,7 @@
     - inventory_hostname != groups['etcd'][0]
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-  notify: set etcd_secret_changed
+  notify: Set etcd_secret_changed
 
 - name: Gen_certs | Write node certs to other etcd nodes
   copy:
@@ -147,12 +147,14 @@
   loop_control:
     label: "{{ item.item }}"
 
-- include_tasks: gen_nodes_certs_script.yml
+- name: Gen_certs | Generate etcd certs
+  include_tasks: gen_nodes_certs_script.yml
   when:
     - inventory_hostname in groups['kube_control_plane'] and
         sync_certs | default(false) and inventory_hostname not in groups['etcd']
 
-- include_tasks: gen_nodes_certs_script.yml
+- name: Gen_certs | Generate etcd certs on nodes if needed
+  include_tasks: gen_nodes_certs_script.yml
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml
index 4c0923b6e..5ec124308 100644
--- a/roles/etcd/tasks/install_docker.yml
+++ b/roles/etcd/tasks/install_docker.yml
@@ -1,5 +1,7 @@
 ---
-- import_tasks: install_etcdctl_docker.yml
+
+- name: Install etcdctl from docker
+  import_tasks: install_etcdctl_docker.yml
   when: etcd_cluster_setup
 
 - name: Get currently-deployed etcd version
@@ -14,14 +16,14 @@
 
 - name: Restart etcd if necessary
   command: /bin/true
-  notify: restart etcd
+  notify: Restart etcd
   when:
     - etcd_cluster_setup
     - etcd_image_tag not in etcd_current_docker_image.stdout | default('')
 
 - name: Restart etcd-events if necessary
   command: /bin/true
-  notify: restart etcd-events
+  notify: Restart etcd-events
   when:
     - etcd_events_cluster_setup
     - etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
index 6abea352b..4c2b3de8b 100644
--- a/roles/etcd/tasks/install_host.yml
+++ b/roles/etcd/tasks/install_host.yml
@@ -8,19 +8,19 @@
 
 - name: Restart etcd if necessary
   command: /bin/true
-  notify: restart etcd
+  notify: Restart etcd
   when:
     - etcd_cluster_setup
     - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
 
 - name: Restart etcd-events if necessary
   command: /bin/true
-  notify: restart etcd-events
+  notify: Restart etcd-events
   when:
     - etcd_events_cluster_setup
     - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
 
-- name: install | Download etcd and etcdctl
+- name: Install | Download etcd and etcdctl
   include_tasks: "../../download/tasks/download_file.yml"
   vars:
     download: "{{ download_defaults | combine(downloads.etcd) }}"
@@ -29,7 +29,7 @@
     - never
     - etcd
 
-- name: install | Copy etcd and etcdctl binary from download dir
+- name: Install | Copy etcd and etcdctl binary from download dir
   copy:
     src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
     dest: "{{ bin_dir }}/{{ item }}"
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index d627b2663..0fad331e3 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -12,7 +12,8 @@
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
     ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
 
-- include_tasks: refresh_config.yml
+- name: Join Member | Refresh etcd config
+  include_tasks: refresh_config.yml
   vars:
     # noqa: jinja[spacing]
     etcd_events_peer_addresses: >-
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index b60a9df9a..ee77d4b26 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -13,7 +13,8 @@
     ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
 
-- include_tasks: refresh_config.yml
+- name: Join Member | Refresh etcd config
+  include_tasks: refresh_config.yml
   vars:
     # noqa: jinja[spacing]
     etcd_peer_addresses: >-
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index f3d304bb8..53afecbb8 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,23 +1,27 @@
 ---
-- include_tasks: check_certs.yml
+- name: Check etcd certs
+  include_tasks: check_certs.yml
   when: cert_management == "script"
   tags:
     - etcd-secrets
     - facts
 
-- include_tasks: "gen_certs_script.yml"
+- name: Generate etcd certs
+  include_tasks: "gen_certs_script.yml"
   when:
     - cert_management | d('script') == "script"
   tags:
     - etcd-secrets
 
-- include_tasks: upd_ca_trust.yml
+- name: Trust etcd CA
+  include_tasks: upd_ca_trust.yml
   when:
     - inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
   tags:
     - etcd-secrets
 
-- include_tasks: upd_ca_trust.yml
+- name: Trust etcd CA on nodes if needed
+  include_tasks: upd_ca_trust.yml
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
@@ -49,29 +53,33 @@
     - master
     - network
 
-- include_tasks: "install_{{ etcd_deployment_type }}.yml"
+- name: Install etcd
+  include_tasks: "install_{{ etcd_deployment_type }}.yml"
   when: is_etcd_master
   tags:
     - upgrade
 
-- include_tasks: configure.yml
+- name: Configure etcd
+  include_tasks: configure.yml
   when: is_etcd_master
 
-- include_tasks: refresh_config.yml
+- name: Refresh etcd config
+  include_tasks: refresh_config.yml
   when: is_etcd_master
 
 - name: Restart etcd if certs changed
   command: /bin/true
-  notify: restart etcd
+  notify: Restart etcd
   when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
 
 - name: Restart etcd-events if certs changed
   command: /bin/true
-  notify: restart etcd
+  notify: Restart etcd
   when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
 
 # After etcd cluster is assembled, make sure that
 # initial state of the cluster is in `existing`
 # state instead of `new`.
-- include_tasks: refresh_config.yml
+- name: Refresh etcd config again for idempotency
+  include_tasks: refresh_config.yml
   when: is_etcd_master
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index 57010fee1..d5e004532 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -4,7 +4,7 @@
     src: etcd.env.j2
     dest: /etc/etcd.env
     mode: 0640
-  notify: restart etcd
+  notify: Restart etcd
   when: is_etcd_master and etcd_cluster_setup
 
 - name: Refresh config | Create etcd-events config file
@@ -12,5 +12,5 @@
     src: etcd-events.env.j2
     dest: /etc/etcd-events.env
     mode: 0640
-  notify: restart etcd-events
+  notify: Restart etcd-events
   when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcdctl/tasks/main.yml b/roles/etcdctl/tasks/main.yml
index 3f0a9d2e1..2690cd445 100644
--- a/roles/etcdctl/tasks/main.yml
+++ b/roles/etcdctl/tasks/main.yml
@@ -14,7 +14,8 @@
     get_mime: no
   register: stat_etcdctl
 
-- block:
+- name: Remove old etcd binary
+  block:
   - name: Check version
     command: "{{ bin_dir }}/etcdctl version"
     register: etcdctl_version
@@ -36,7 +37,8 @@
     get_mime: no
   register: stat_etcdctl
 
-- block:
+- name: Copy etcdctl script to host
+  block:
   - name: Copy etcdctl script to host
     shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
     when: container_manager ==  "docker"
diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
index 528519bee..6bfcc25e4 100644
--- a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
+++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
@@ -1,6 +1,7 @@
 ---
 
-- import_tasks: credentials-check.yml
+- name: OCI Cloud Controller | Check Oracle Cloud credentials
+  import_tasks: credentials-check.yml
 
 - name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
   template:
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 668f18afd..e4c37d39b 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -59,7 +59,8 @@
     - inventory_hostname == groups['kube_control_plane'][0]
   tags: node-webhook
 
-- include_tasks: oci.yml
+- name: Configure Oracle Cloud provider
+  include_tasks: oci.yml
   tags: oci
   when:
     - cloud_provider is defined
diff --git a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
index 46384d281..669014124 100644
--- a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
+++ b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
@@ -1,6 +1,6 @@
 ---
 
-- name: crun | Copy runtime class manifest
+- name: Crun | Copy runtime class manifest
   template:
     src: runtimeclass-crun.yml
     dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
@@ -8,7 +8,7 @@
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- name: crun | Apply manifests
+- name: Crun | Apply manifests
   kube:
     name: "runtimeclass-crun"
     kubectl: "{{ bin_dir }}/kubectl"
diff --git a/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml
index b5b881e85..90562f229 100644
--- a/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml
+++ b/roles/kubernetes-apps/container_runtimes/gvisor/tasks/main.yaml
@@ -1,5 +1,5 @@
 ---
-- name: gVisor | Create addon dir
+- name: GVisor | Create addon dir
   file:
     path: "{{ kube_config_dir }}/addons/gvisor"
     owner: root
@@ -7,12 +7,12 @@
     mode: 0755
     recurse: true
 
-- name: gVisor | Templates List
+- name: GVisor | Templates List
   set_fact:
     gvisor_templates:
       - { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
 
-- name: gVisort | Create manifests
+- name: GVisort | Create manifests
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
@@ -22,7 +22,7 @@
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- name: gVisor | Apply manifests
+- name: GVisor | Apply manifests
   kube:
     name: "{{ item.item.name }}"
     kubectl: "{{ bin_dir }}/kubectl"
diff --git a/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml
index 6da025f04..8ba7c7a99 100644
--- a/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml
+++ b/roles/kubernetes-apps/container_runtimes/youki/tasks/main.yaml
@@ -1,6 +1,6 @@
 ---
 
-- name: youki | Copy runtime class manifest
+- name: Youki | Copy runtime class manifest
   template:
     src: runtimeclass-youki.yml
     dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
@@ -8,7 +8,7 @@
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- name: youki | Apply manifests
+- name: Youki | Apply manifests
   kube:
     name: "runtimeclass-youki"
     kubectl: "{{ bin_dir }}/kubectl"
diff --git a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
index 67ce86512..a94656f48 100644
--- a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- include_tasks: azure-credential-check.yml
+- name: Azure CSI Driver | Check Azure credentials
+  include_tasks: azure-credential-check.yml
 
 - name: Azure CSI Driver | Write Azure CSI cloud-config
   template:
diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
index 7d5affe61..47ce6cd89 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- include_tasks: cinder-credential-check.yml
+- name: Cinder CSI Driver | Check Cinder credentials
+  include_tasks: cinder-credential-check.yml
 
 - name: Cinder CSI Driver | Write cacert file
   include_tasks: cinder-write-cacert.yml
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
index 0fe5c49e3..102dd8be0 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
@@ -1,7 +1,8 @@
 ---
-- include_tasks: vsphere-credentials-check.yml
+- name: VSphere CSI Driver | Check vsphare credentials
+  include_tasks: vsphere-credentials-check.yml
 
-- name: vSphere CSI Driver | Generate CSI cloud-config
+- name: VSphere CSI Driver | Generate CSI cloud-config
   template:
     src: "{{ item }}.j2"
     dest: "{{ kube_config_dir }}/{{ item }}"
@@ -10,7 +11,7 @@
     - vsphere-csi-cloud-config
   when: inventory_hostname == groups['kube_control_plane'][0]
 
-- name: vSphere CSI Driver | Generate Manifests
+- name: VSphere CSI Driver | Generate Manifests
   template:
     src: "{{ item }}.j2"
     dest: "{{ kube_config_dir }}/{{ item }}"
@@ -27,7 +28,7 @@
   register: vsphere_csi_manifests
   when: inventory_hostname == groups['kube_control_plane'][0]
 
-- name: vSphere CSI Driver | Apply Manifests
+- name: VSphere CSI Driver | Apply Manifests
   kube:
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/{{ item.item }}"
@@ -40,13 +41,13 @@
   loop_control:
     label: "{{ item.item }}"
 
-- name: vSphere CSI Driver | Generate a CSI secret manifest
+- name: VSphere CSI Driver | Generate a CSI secret manifest
   command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
   register: vsphere_csi_secret_manifest
   when: inventory_hostname == groups['kube_control_plane'][0]
   no_log: "{{ not (unsafe_show_logs | bool) }}"
 
-- name: vSphere CSI Driver | Apply a CSI secret manifest
+- name: VSphere CSI Driver | Apply a CSI secret manifest
   command:
     cmd: "{{ kubectl }} apply -f -"
     stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
index ac3810c7c..787dbb444 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- include_tasks: openstack-credential-check.yml
+- name: External OpenStack Cloud Controller | Check OpenStack credentials
+  include_tasks: openstack-credential-check.yml
   tags: external-openstack
 
 - name: External OpenStack Cloud Controller | Get base64 cacert
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
index 9c25c729f..60b8ec83b 100644
--- a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- include_tasks: vsphere-credentials-check.yml
+- name: External vSphere Cloud Controller | Check vsphere credentials
+  include_tasks: vsphere-credentials-check.yml
 
 - name: External vSphere Cloud Controller | Generate CPI cloud-config
   template:
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index 70cd7adc3..1d756a0c1 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 
-- name: kube-router | Start Resources
+- name: Kube-router | Start Resources
   kube:
     name: "kube-router"
     kubectl: "{{ bin_dir }}/kubectl"
@@ -11,7 +11,7 @@
   delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
 
-- name: kube-router | Wait for kube-router pods to be ready
+- name: Kube-router | Wait for kube-router pods to be ready
   command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa ignore-errors
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
diff --git a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
index 8663e8a24..e6da2920a 100644
--- a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
+++ b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: check if snapshot namespace exists
+- name: Check if snapshot namespace exists
   register: snapshot_namespace_exists
   kube:
     kubectl: "{{ bin_dir }}/kubectl"
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index 4483038f9..e6197611e 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -100,7 +100,7 @@
   run_once: yes
   when: kubectl_localhost
 
-- name: create helper script kubectl.sh on ansible host
+- name: Create helper script kubectl.sh on ansible host
   copy:
     content: |
       #!/bin/bash
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
index f1c92aeee..f3fd207c4 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
@@ -47,7 +47,7 @@
     timeout: 180
 
 
-- name: check already run
+- name: Check already run
   debug:
     msg: "{{ kubeadm_already_run.stat.exists }}"
 
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 4f1ea288d..375008a60 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -10,7 +10,7 @@
     - kube_oidc_auth
     - kube_oidc_ca_cert is defined
 
-- name: kubeadm | Check if kubeadm has already run
+- name: Kubeadm | Check if kubeadm has already run
   stat:
     path: "/var/lib/kubelet/config.yaml"
     get_attributes: no
@@ -18,12 +18,12 @@
     get_mime: no
   register: kubeadm_already_run
 
-- name: kubeadm | Backup kubeadm certs / kubeconfig
+- name: Kubeadm | Backup kubeadm certs / kubeconfig
   import_tasks: kubeadm-backup.yml
   when:
     - kubeadm_already_run.stat.exists
 
-- name: kubeadm | aggregate all SANs
+- name: Kubeadm | aggregate all SANs
   set_fact:
     apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
   vars:
@@ -69,7 +69,7 @@
   when: kubernetes_audit_webhook | default(false)
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
-- name: set kubeadm_config_api_fqdn define
+- name: Set kubeadm_config_api_fqdn define
   set_fact:
     kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
   when: loadbalancer_apiserver is defined
@@ -78,27 +78,27 @@
   set_fact:
     kubeadmConfig_api_version: v1beta3
 
-- name: kubeadm | Create kubeadm config
+- name: Kubeadm | Create kubeadm config
   template:
     src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
     dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
     mode: 0640
 
-- name: kubeadm | Create directory to store admission control configurations
+- name: Kubeadm | Create directory to store admission control configurations
   file:
     path: "{{ kube_config_dir }}/admission-controls"
     state: directory
     mode: 0640
   when: kube_apiserver_admission_control_config_file
 
-- name: kubeadm | Push admission control config file
+- name: Kubeadm | Push admission control config file
   template:
     src: "admission-controls.yaml.j2"
     dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
     mode: 0640
   when: kube_apiserver_admission_control_config_file
 
-- name: kubeadm | Push admission control config files
+- name: Kubeadm | Push admission control config files
   template:
     src: "{{ item | lower }}.yaml.j2"
     dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
@@ -108,15 +108,15 @@
     - item in kube_apiserver_admission_plugins_needs_configuration
   loop: "{{ kube_apiserver_enable_admission_plugins }}"
 
-- name: kubeadm | Check apiserver.crt SANs
+- name: Kubeadm | Check apiserver.crt SANs
   block:
-    - name: kubeadm | Check apiserver.crt SAN IPs
+    - name: Kubeadm | Check apiserver.crt SAN IPs
       command:
         cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkip {{ item }}"
       loop: "{{ apiserver_ips }}"
       register: apiserver_sans_ip_check
       changed_when: apiserver_sans_ip_check.stdout is not search('does match certificate')
-    - name: kubeadm | Check apiserver.crt SAN hosts
+    - name: Kubeadm | Check apiserver.crt SAN hosts
       command:
         cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkhost {{ item }}"
       loop: "{{ apiserver_hosts }}"
@@ -129,7 +129,7 @@
     - kubeadm_already_run.stat.exists
     - not kube_external_ca_mode
 
-- name: kubeadm | regenerate apiserver cert 1/2
+- name: Kubeadm | regenerate apiserver cert 1/2
   file:
     state: absent
     path: "{{ kube_cert_dir }}/{{ item }}"
@@ -141,7 +141,7 @@
     - apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
     - not kube_external_ca_mode
 
-- name: kubeadm | regenerate apiserver cert 2/2
+- name: Kubeadm | regenerate apiserver cert 2/2
   command: >-
     {{ bin_dir }}/kubeadm
     init phase certs apiserver
@@ -151,14 +151,14 @@
     - apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
     - not kube_external_ca_mode
 
-- name: kubeadm | Create directory to store kubeadm patches
+- name: Kubeadm | Create directory to store kubeadm patches
   file:
     path: "{{ kubeadm_patches.dest_dir }}"
     state: directory
     mode: 0640
   when: kubeadm_patches is defined and kubeadm_patches.enabled
 
-- name: kubeadm | Copy kubeadm patches from inventory files
+- name: Kubeadm | Copy kubeadm patches from inventory files
   copy:
     src: "{{ kubeadm_patches.source_dir }}/"
     dest: "{{ kubeadm_patches.dest_dir }}"
@@ -166,7 +166,7 @@
     mode: 0644
   when: kubeadm_patches is defined and kubeadm_patches.enabled
 
-- name: kubeadm | Initialize first master
+- name: Kubeadm | Initialize first master
   command: >-
     timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
     {{ bin_dir }}/kubeadm init
@@ -184,7 +184,7 @@
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
   notify: Master | restart kubelet
 
-- name: set kubeadm certificate key
+- name: Set kubeadm certificate key
   set_fact:
     kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
   with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
@@ -229,17 +229,17 @@
     - podsecuritypolicy_enabled
     - inventory_hostname == first_kube_control_plane
 
-- name: kubeadm | Join other masters
+- name: Kubeadm | Join other masters
   include_tasks: kubeadm-secondary.yml
 
-- name: kubeadm | upgrade kubernetes cluster
+- name: Kubeadm | upgrade kubernetes cluster
   include_tasks: kubeadm-upgrade.yml
   when:
     - upgrade_cluster_setup
     - kubeadm_already_run.stat.exists
 
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
-- name: kubeadm | Remove taint for master with node role
+- name: Kubeadm | Remove taint for master with node role
   command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
   delegate_to: "{{ first_kube_control_plane }}"
   with_items:
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
index 4a0043ef2..12ab0b934 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
@@ -1,5 +1,5 @@
 ---
-- name: kubeadm | Check api is up
+- name: Kubeadm | Check api is up
   uri:
     url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
     validate_certs: false
@@ -9,7 +9,7 @@
   delay: 5
   until: _result.status == 200
 
-- name: kubeadm | Upgrade first master
+- name: Kubeadm | Upgrade first master
   command: >-
     timeout -k 600s 600s
     {{ bin_dir }}/kubeadm
@@ -31,7 +31,7 @@
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
   notify: Master | restart kubelet
 
-- name: kubeadm | Upgrade other masters
+- name: Kubeadm | Upgrade other masters
   command: >-
     timeout -k 600s 600s
     {{ bin_dir }}/kubeadm
@@ -53,7 +53,7 @@
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
   notify: Master | restart kubelet
 
-- name: kubeadm | clean kubectl cache to refresh api types
+- name: Kubeadm | clean kubectl cache to refresh api types
   file:
     path: "{{ item }}"
     state: absent
@@ -62,7 +62,7 @@
     - /root/.kube/http-cache
 
 # FIXME: https://github.com/kubernetes/kubeadm/issues/1318
-- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
+- name: Kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
   command: >-
     {{ kubectl }}
     -n kube-system
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index 2fab9d57c..1840e3bb6 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- import_tasks: pre-upgrade.yml
+- name: Pre-upgrade control plane
+  import_tasks: pre-upgrade.yml
   tags:
     - k8s-pre-upgrade
 
@@ -23,7 +24,8 @@
     dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
     mode: 0644
 
-- import_tasks: encrypt-at-rest.yml
+- name: Apply Kubernetes encrypt at rest config
+  import_tasks: encrypt-at-rest.yml
   when:
     - kube_encrypt_secret_data
 
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index c8b76f019..290eca39d 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -65,14 +65,14 @@
     mode: 0640
   when: not is_kube_master
 
-- name: kubeadm | Create directory to store kubeadm patches
+- name: Kubeadm | Create directory to store kubeadm patches
   file:
     path: "{{ kubeadm_patches.dest_dir }}"
     state: directory
     mode: 0640
   when: kubeadm_patches is defined and kubeadm_patches.enabled
 
-- name: kubeadm | Copy kubeadm patches from inventory files
+- name: Kubeadm | Copy kubeadm patches from inventory files
   copy:
     src: "{{ kubeadm_patches.source_dir }}/"
     dest: "{{ kubeadm_patches.dest_dir }}"
diff --git a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
index 8ff55cf99..c5d603084 100644
--- a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
@@ -1,82 +1,82 @@
 ---
-- name: check azure_tenant_id value
+- name: Check azure_tenant_id value
   fail:
     msg: "azure_tenant_id is missing"
   when: azure_tenant_id is not defined or not azure_tenant_id
 
-- name: check azure_subscription_id value
+- name: Check azure_subscription_id value
   fail:
     msg: "azure_subscription_id is missing"
   when: azure_subscription_id is not defined or not azure_subscription_id
 
-- name: check azure_aad_client_id value
+- name: Check azure_aad_client_id value
   fail:
     msg: "azure_aad_client_id is missing"
   when: azure_aad_client_id is not defined or not azure_aad_client_id
 
-- name: check azure_aad_client_secret value
+- name: Check azure_aad_client_secret value
   fail:
     msg: "azure_aad_client_secret is missing"
   when: azure_aad_client_secret is not defined or not azure_aad_client_secret
 
-- name: check azure_resource_group value
+- name: Check azure_resource_group value
   fail:
     msg: "azure_resource_group is missing"
   when: azure_resource_group is not defined or not azure_resource_group
 
-- name: check azure_location value
+- name: Check azure_location value
   fail:
     msg: "azure_location is missing"
   when: azure_location is not defined or not azure_location
 
-- name: check azure_subnet_name value
+- name: Check azure_subnet_name value
   fail:
     msg: "azure_subnet_name is missing"
   when: azure_subnet_name is not defined or not azure_subnet_name
 
-- name: check azure_security_group_name value
+- name: Check azure_security_group_name value
   fail:
     msg: "azure_security_group_name is missing"
   when: azure_security_group_name is not defined or not azure_security_group_name
 
-- name: check azure_vnet_name value
+- name: Check azure_vnet_name value
   fail:
     msg: "azure_vnet_name is missing"
   when: azure_vnet_name is not defined or not azure_vnet_name
 
-- name: check azure_vnet_resource_group value
+- name: Check azure_vnet_resource_group value
   fail:
     msg: "azure_vnet_resource_group is missing"
   when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group
 
-- name: check azure_route_table_name value
+- name: Check azure_route_table_name value
   fail:
     msg: "azure_route_table_name is missing"
   when: azure_route_table_name is not defined or not azure_route_table_name
 
-- name: check azure_loadbalancer_sku value
+- name: Check azure_loadbalancer_sku value
   fail:
     msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
   when: azure_loadbalancer_sku not in ["basic", "standard"]
 
-- name: "check azure_exclude_master_from_standard_lb is a bool"
+- name: "Check azure_exclude_master_from_standard_lb is a bool"
   assert:
     that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
 
-- name: "check azure_disable_outbound_snat is a bool"
+- name: "Check azure_disable_outbound_snat is a bool"
   assert:
     that: azure_disable_outbound_snat | type_debug == 'bool'
 
-- name: "check azure_use_instance_metadata is a bool"
+- name: "Check azure_use_instance_metadata is a bool"
   assert:
     that: azure_use_instance_metadata | type_debug == 'bool'
 
-- name: check azure_vmtype value
+- name: Check azure_vmtype value
   fail:
     msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'"
   when: azure_vmtype is not defined or not azure_vmtype
 
-- name: check azure_cloud value
+- name: Check azure_cloud value
   fail:
     msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'."
   when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"]
diff --git a/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml
index 6ff17325f..7354d43af 100644
--- a/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml
+++ b/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml
@@ -1,32 +1,32 @@
 ---
-- name: check openstack_auth_url value
+- name: Check openstack_auth_url value
   fail:
     msg: "openstack_auth_url is missing"
   when: openstack_auth_url is not defined or not openstack_auth_url
 
-- name: check openstack_username value
+- name: Check openstack_username value
   fail:
     msg: "openstack_username is missing"
   when: openstack_username is not defined or not openstack_username
 
-- name: check openstack_password value
+- name: Check openstack_password value
   fail:
     msg: "openstack_password is missing"
   when: openstack_password is not defined or not openstack_password
 
-- name: check openstack_region value
+- name: Check openstack_region value
   fail:
     msg: "openstack_region is missing"
   when: openstack_region is not defined or not openstack_region
 
-- name: check openstack_tenant_id value
+- name: Check openstack_tenant_id value
   fail:
     msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
   when:
     - openstack_tenant_id is not defined or not openstack_tenant_id
     - openstack_trust_id is not defined
 
-- name: check openstack_trust_id value
+- name: Check openstack_trust_id value
   fail:
     msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
   when:
diff --git a/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml
index 873eb71c3..b18583af0 100644
--- a/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml
+++ b/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml
@@ -1,5 +1,5 @@
 ---
-- name: check vsphere environment variables
+- name: Check vsphere environment variables
   fail:
     msg: "{{ item.name }} is missing"
   when: item.value is not defined or not item.value
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index 43af5cceb..754752d35 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -1,6 +1,7 @@
 ---
-- block:
-  - name: look up docker cgroup driver
+- name: Gather cgroups facts for docker
+  block:
+  - name: Look up docker cgroup driver
     shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
     args:
       executable: /bin/bash
@@ -8,47 +9,48 @@
     changed_when: false
     check_mode: no
 
-  - name: set kubelet_cgroup_driver_detected fact for docker
+  - name: Set kubelet_cgroup_driver_detected fact for docker
     set_fact:
       kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
   when: container_manager == 'docker'
 
-- block:
-  - name: look up crio cgroup driver
+- name: Gather cgroups facts for crio
+  block:
+  - name: Look up crio cgroup driver
     shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
     args:
       executable: /bin/bash
     register: crio_cgroup_driver_result
     changed_when: false
 
-  - name: set kubelet_cgroup_driver_detected fact for crio
+  - name: Set kubelet_cgroup_driver_detected fact for crio
     set_fact:
       kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
   when: container_manager == 'crio'
 
-- name: set kubelet_cgroup_driver_detected fact for containerd
+- name: Set kubelet_cgroup_driver_detected fact for containerd
   set_fact:
     kubelet_cgroup_driver_detected: >-
       {%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
   when: container_manager == 'containerd'
 
-- name: set kubelet_cgroup_driver
+- name: Set kubelet_cgroup_driver
   set_fact:
     kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}"
   when: kubelet_cgroup_driver is undefined
 
-- name: set kubelet_cgroups options when cgroupfs is used
+- name: Set kubelet_cgroups options when cgroupfs is used
   set_fact:
     kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}"
     kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}"
   when: kubelet_cgroup_driver == 'cgroupfs'
 
-- name: set kubelet_config_extra_args options when cgroupfs is used
+- name: Set kubelet_config_extra_args options when cgroupfs is used
   set_fact:
     kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
   when: kubelet_cgroup_driver == 'cgroupfs'
 
-- name: os specific vars
+- name: Os specific vars
   include_vars: "{{ item }}"
   with_first_found:
   - files:
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index 524353839..fb1e8adc5 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -1,5 +1,5 @@
 ---
-- name: install | Copy kubeadm binary from download dir
+- name: Install | Copy kubeadm binary from download dir
   copy:
     src: "{{ downloads.kubeadm.dest }}"
     dest: "{{ bin_dir }}/kubeadm"
@@ -10,7 +10,7 @@
   when:
     - not inventory_hostname in groups['kube_control_plane']
 
-- name: install | Copy kubelet binary from download dir
+- name: Install | Copy kubelet binary from download dir
   copy:
     src: "{{ downloads.kubelet.dest }}"
     dest: "{{ bin_dir }}/kubelet"
diff --git a/roles/kubernetes/node/tasks/kubelet.yml b/roles/kubernetes/node/tasks/kubelet.yml
index be429dcfb..ee01d06cf 100644
--- a/roles/kubernetes/node/tasks/kubelet.yml
+++ b/roles/kubernetes/node/tasks/kubelet.yml
@@ -39,7 +39,7 @@
     - kubelet
     - kubeadm
 
-- name: flush_handlers and reload-systemd
+- name: Flush_handlers and reload-systemd
   meta: flush_handlers
 
 - name: Enable kubelet
diff --git a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
index c8e010817..7e5cfcedd 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
@@ -1,17 +1,17 @@
 ---
-- name: haproxy | Cleanup potentially deployed nginx-proxy
+- name: Haproxy | Cleanup potentially deployed nginx-proxy
   file:
     path: "{{ kube_manifest_dir }}/nginx-proxy.yml"
     state: absent
 
-- name: haproxy | Make haproxy directory
+- name: Haproxy | Make haproxy directory
   file:
     path: "{{ haproxy_config_dir }}"
     state: directory
     mode: 0755
     owner: root
 
-- name: haproxy | Write haproxy configuration
+- name: Haproxy | Write haproxy configuration
   template:
     src: "loadbalancer/haproxy.cfg.j2"
     dest: "{{ haproxy_config_dir }}/haproxy.cfg"
@@ -19,7 +19,7 @@
     mode: 0755
     backup: yes
 
-- name: haproxy | Get checksum from config
+- name: Haproxy | Get checksum from config
   stat:
     path: "{{ haproxy_config_dir }}/haproxy.cfg"
     get_attributes: no
@@ -27,7 +27,7 @@
     get_mime: no
   register: haproxy_stat
 
-- name: haproxy | Write static pod
+- name: Haproxy | Write static pod
   template:
     src: manifests/haproxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/haproxy.yml"
diff --git a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
index e12bd9bfc..f7b04a624 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
@@ -1,12 +1,12 @@
 ---
-- name: kube-vip  | Check cluster settings for kube-vip
+- name: Kube-vip  | Check cluster settings for kube-vip
   fail:
     msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md"
   when:
     - kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp
     - kube_vip_arp_enabled
 
-- name: kube-vip | Write static pod
+- name: Kube-vip | Write static pod
   template:
     src: manifests/kube-vip.manifest.j2
     dest: "{{ kube_manifest_dir }}/kube-vip.yml"
diff --git a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
index e176cb976..5b82ff620 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
@@ -1,17 +1,17 @@
 ---
-- name: haproxy | Cleanup potentially deployed haproxy
+- name: Haproxy | Cleanup potentially deployed haproxy
   file:
     path: "{{ kube_manifest_dir }}/haproxy.yml"
     state: absent
 
-- name: nginx-proxy | Make nginx directory
+- name: Nginx-proxy | Make nginx directory
   file:
     path: "{{ nginx_config_dir }}"
     state: directory
     mode: 0700
     owner: root
 
-- name: nginx-proxy | Write nginx-proxy configuration
+- name: Nginx-proxy | Write nginx-proxy configuration
   template:
     src: "loadbalancer/nginx.conf.j2"
     dest: "{{ nginx_config_dir }}/nginx.conf"
@@ -19,7 +19,7 @@
     mode: 0755
     backup: yes
 
-- name: nginx-proxy | Get checksum from config
+- name: Nginx-proxy | Get checksum from config
   stat:
     path: "{{ nginx_config_dir }}/nginx.conf"
     get_attributes: no
@@ -27,7 +27,7 @@
     get_mime: no
   register: nginx_stat
 
-- name: nginx-proxy | Write static pod
+- name: Nginx-proxy | Write static pod
   template:
     src: manifests/nginx-proxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index e79ca5c4d..1b822cf2f 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -1,9 +1,11 @@
 ---
-- import_tasks: facts.yml
+- name: Fetch facts
+  import_tasks: facts.yml
   tags:
     - facts
 
-- import_tasks: pre_upgrade.yml
+- name: Pre-upgrade kubelet
+  import_tasks: pre_upgrade.yml
   tags:
     - kubelet
 
@@ -13,18 +15,21 @@
     state: directory
     mode: 0755
 
-- import_tasks: install.yml
+- name: Install kubelet binary
+  import_tasks: install.yml
   tags:
     - kubelet
 
-- import_tasks: loadbalancer/kube-vip.yml
+- name: Install kube-vip
+  import_tasks: loadbalancer/kube-vip.yml
   when:
     - is_kube_master
     - kube_vip_enabled
   tags:
     - kube-vip
 
-- import_tasks: loadbalancer/nginx-proxy.yml
+- name: Install nginx-proxy
+  import_tasks: loadbalancer/nginx-proxy.yml
   when:
     - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
     - loadbalancer_apiserver_localhost
@@ -32,7 +37,8 @@
   tags:
     - nginx
 
-- import_tasks: loadbalancer/haproxy.yml
+- name: Install haproxy
+  import_tasks: loadbalancer/haproxy.yml
   when:
     - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
     - loadbalancer_apiserver_localhost
@@ -141,7 +147,8 @@
   tags:
     - kube-proxy
 
-- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
+- name: Check cloud provider credentials
+  include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
   when:
     - cloud_provider is defined
     - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
@@ -187,7 +194,8 @@
   tags:
     - cloud-provider
 
-- import_tasks: kubelet.yml
+- name: Install kubelet
+  import_tasks: kubelet.yml
   tags:
     - kubelet
     - kubeadm
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
index ce574f86c..c603f37b1 100644
--- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -9,7 +9,7 @@
     - none
 
 # kubelet fails even if ansible_swaptotal_mb = 0
-- name: check swap
+- name: Check swap
   command: /sbin/swapon -s
   register: swapon
   changed_when: no
diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
index 8d4c9ac31..8e05ac29f 100644
--- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
@@ -21,7 +21,7 @@
   tags:
     - facts
 
-- name: check if booted with ostree
+- name: Check if booted with ostree
   stat:
     path: /run/ostree-booted
     get_attributes: no
@@ -29,7 +29,7 @@
     get_mime: no
   register: ostree
 
-- name: set is_fedora_coreos
+- name: Set is_fedora_coreos
   lineinfile:
     path: /etc/os-release
     line: "VARIANT_ID=coreos"
@@ -38,18 +38,18 @@
   register: os_variant_coreos
   changed_when: false
 
-- name: set is_fedora_coreos
+- name: Set is_fedora_coreos
   set_fact:
     is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
 
-- name: check resolvconf
+- name: Check resolvconf
   command: which resolvconf
   register: resolvconf
   failed_when: false
   changed_when: false
   check_mode: no
 
-- name: check existence of /etc/resolvconf/resolv.conf.d
+- name: Check existence of /etc/resolvconf/resolv.conf.d
   stat:
     path: /etc/resolvconf/resolv.conf.d
     get_attributes: no
@@ -58,7 +58,7 @@
   failed_when: false
   register: resolvconfd_path
 
-- name: check status of /etc/resolv.conf
+- name: Check status of /etc/resolv.conf
   stat:
     path: /etc/resolv.conf
     follow: no
@@ -68,14 +68,15 @@
   failed_when: false
   register: resolvconf_stat
 
-- block:
+- name: Fetch resolconf
+  block:
 
-    - name: get content of /etc/resolv.conf
+    - name: Get content of /etc/resolv.conf
       slurp:
         src: /etc/resolv.conf
       register: resolvconf_slurp
 
-    - name: get currently configured nameservers
+    - name: Get currently configured nameservers
       set_fact:
         configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}"
       when: resolvconf_slurp.content is defined
@@ -100,7 +101,7 @@
   changed_when: false
   check_mode: false
 
-- name: check systemd-resolved
+- name: Check systemd-resolved
   # noqa command-instead-of-module - Should we use service_facts for this?
   command: systemctl is-active systemd-resolved
   register: systemd_resolved_enabled
@@ -108,12 +109,12 @@
   changed_when: false
   check_mode: no
 
-- name: set default dns if remove_default_searchdomains is false
+- name: Set default dns if remove_default_searchdomains is false
   set_fact:
     default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
   when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)
 
-- name: set dns facts
+- name: Set dns facts
   set_fact:
     resolvconf: >-
       {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%}
@@ -125,7 +126,7 @@
                         ['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else
                         [] }}"
 
-- name: check if kubelet is configured
+- name: Check if kubelet is configured
   stat:
     path: "{{ kube_config_dir }}/kubelet.env"
     get_attributes: no
@@ -134,11 +135,11 @@
   register: kubelet_configured
   changed_when: false
 
-- name: check if early DNS configuration stage
+- name: Check if early DNS configuration stage
   set_fact:
     dns_early: "{{ not kubelet_configured.stat.exists }}"
 
-- name: target resolv.conf files
+- name: Target resolv.conf files
   set_fact:
     resolvconffile: /etc/resolv.conf
     base: >-
@@ -147,12 +148,12 @@
       {%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
   when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
 
-- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
+- name: Target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
   set_fact:
     resolvconffile: /tmp/resolveconf_cloud_init_conf
   when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos
 
-- name: check if /etc/dhclient.conf exists
+- name: Check if /etc/dhclient.conf exists
   stat:
     path: /etc/dhclient.conf
     get_attributes: no
@@ -160,12 +161,12 @@
     get_mime: no
   register: dhclient_stat
 
-- name: target dhclient conf file for /etc/dhclient.conf
+- name: Target dhclient conf file for /etc/dhclient.conf
   set_fact:
     dhclientconffile: /etc/dhclient.conf
   when: dhclient_stat.stat.exists
 
-- name: check if /etc/dhcp/dhclient.conf exists
+- name: Check if /etc/dhcp/dhclient.conf exists
   stat:
     path: /etc/dhcp/dhclient.conf
     get_attributes: no
@@ -173,22 +174,22 @@
     get_mime: no
   register: dhcp_dhclient_stat
 
-- name: target dhclient conf file for /etc/dhcp/dhclient.conf
+- name: Target dhclient conf file for /etc/dhcp/dhclient.conf
   set_fact:
     dhclientconffile: /etc/dhcp/dhclient.conf
   when: dhcp_dhclient_stat.stat.exists
 
-- name: target dhclient hook file for Red Hat family
+- name: Target dhclient hook file for Red Hat family
   set_fact:
     dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh
   when: ansible_os_family == "RedHat"
 
-- name: target dhclient hook file for Debian family
+- name: Target dhclient hook file for Debian family
   set_fact:
     dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
   when: ansible_os_family == "Debian"
 
-- name: generate search domains to resolvconf
+- name: Generate search domains to resolvconf
   set_fact:
     searchentries:
       search {{ (default_searchdomains | default([]) + searchdomains | default([])) | join(' ') }}
@@ -199,7 +200,7 @@
     supersede_domain:
       supersede domain-name "{{ dns_domain }}";
 
-- name: pick coredns cluster IP or default resolver
+- name: Pick coredns cluster IP or default resolver
   set_fact:
     coredns_server: |-
       {%- if dns_mode == 'coredns' and not dns_early | bool -%}
@@ -215,7 +216,7 @@
       {%- endif -%}
 
 # This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout
-- name: generate nameservers for resolvconf, including cluster DNS
+- name: Generate nameservers for resolvconf, including cluster DNS
   set_fact:
     nameserverentries: |-
       {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([]) + (configured_nameservers | d([]) if not disable_host_nameservers | d() | bool else [])) | unique | join(',') }}
@@ -225,7 +226,7 @@
 
 # This task should run instead of the above task when cluster/nodelocal DNS hasn't
 # been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml)
-- name: generate nameservers for resolvconf, not including cluster DNS
+- name: Generate nameservers for resolvconf, not including cluster DNS
   set_fact:
     nameserverentries: |-
       {{ (nameservers | d([]) + cloud_resolver | d([]) + configured_nameservers | d([])) | unique | join(',') }}
@@ -233,7 +234,7 @@
       supersede domain-name-servers {{ (nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }};
   when: dns_early and not dns_late
 
-- name: gather os specific variables
+- name: Gather os specific variables
   include_vars: "{{ item }}"
   with_first_found:
     - files:
@@ -247,7 +248,7 @@
         - ../vars
       skip: true
 
-- name: set etcd vars if using kubeadm mode
+- name: Set etcd vars if using kubeadm mode
   set_fact:
     etcd_cert_dir: "{{ kube_cert_dir }}"
     kube_etcd_cacert_file: "etcd/ca.crt"
@@ -256,7 +257,7 @@
   when:
     - etcd_deployment_type == "kubeadm"
 
-- name: check /usr readonly
+- name: Check /usr readonly
   stat:
     path: "/usr"
     get_attributes: no
@@ -264,7 +265,7 @@
     get_mime: no
   register: usr
 
-- name: set alternate flexvolume path
+- name: Set alternate flexvolume path
   set_fact:
     kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins
   when: not usr.stat.writeable
diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
index 3b4ec4bd7..9453d73c8 100644
--- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
@@ -152,7 +152,7 @@
     msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
   when: not ignore_assert_errors
 
-- name: check cloud_provider value
+- name: Check cloud_provider value
   assert:
     that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external']
     msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'"
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 884ffbb49..da5fc8516 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -1,5 +1,5 @@
 ---
-- name: create temporary resolveconf cloud init file
+- name: Create temporary resolveconf cloud init file
   command: cp -f /etc/resolv.conf "{{ resolvconffile }}"
   when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
@@ -43,12 +43,12 @@
     - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
   notify: Preinstall | propagate resolvconf to k8s components
 
-- name: get temporary resolveconf cloud init file content
+- name: Get temporary resolveconf cloud init file content
   command: cat {{ resolvconffile }}
   register: cloud_config
   when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
 
-- name: persist resolvconf cloud init file
+- name: Persist resolvconf cloud init file
   template:
     dest: "{{ resolveconf_cloud_init_conf }}"
     src: resolvconf.j2
diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
index ae5e68914..b80fe8f09 100644
--- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
+++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
@@ -9,7 +9,7 @@
     backup: yes
   notify: Preinstall | update resolvconf for networkmanager
 
-- name: set default dns if remove_default_searchdomains is false
+- name: Set default dns if remove_default_searchdomains is false
   set_fact:
     default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
   when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index eb81d7d8b..50b3808c7 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -9,7 +9,8 @@
     - ansible_pkg_mgr == 'zypper'
   tags: bootstrap-os
 
-- block:
+- name: Add debian 10 required repos
+  block:
     - name: Add Debian Backports apt repo
       apt_repository:
         repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index d4b7957f9..621629f6a 100644
--- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -2,7 +2,7 @@
 
 # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
 
-- name: install growpart
+- name: Install growpart
   package:
     name: cloud-utils-growpart
     state: present
@@ -20,7 +20,7 @@
     partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}"
     root_device: "{{ _root_device }}"
 
-- name: check if growpart needs to be run
+- name: Check if growpart needs to be run
   command: growpart -N {{ device }} {{ partition }}
   failed_when: False
   changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
@@ -28,17 +28,17 @@
   environment:
     LC_ALL: C
 
-- name: check fs type
+- name: Check fs type
   command: file -Ls {{ root_device }}
   changed_when: False
   register: fs_type
 
-- name: run growpart  # noqa no-handler
+- name: Run growpart  # noqa no-handler
   command: growpart {{ device }} {{ partition }}
   when: growpart_needed.changed
   environment:
     LC_ALL: C
 
-- name: run xfs_growfs  # noqa no-handler
+- name: Run xfs_growfs  # noqa no-handler
   command: xfs_growfs {{ root_device }}
   when: growpart_needed.changed and 'XFS' in fs_type.stdout
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 133c45d9b..ee4de5d50 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -1,26 +1,31 @@
 ---
 # Disable swap
-- import_tasks: 0010-swapoff.yml
+- name: Disable swap
+  import_tasks: 0010-swapoff.yml
   when:
     - not dns_late
     - kubelet_fail_swap_on
 
-- import_tasks: 0020-set_facts.yml
+- name: Set facts
+  import_tasks: 0020-set_facts.yml
   tags:
     - resolvconf
     - facts
 
-- import_tasks: 0040-verify-settings.yml
+- name: Check settings
+  import_tasks: 0040-verify-settings.yml
   when:
     - not dns_late
   tags:
     - asserts
 
-- import_tasks: 0050-create_directories.yml
+- name: Create directories
+  import_tasks: 0050-create_directories.yml
   when:
     - not dns_late
 
-- import_tasks: 0060-resolvconf.yml
+- name: Apply resolvconf settings
+  import_tasks: 0060-resolvconf.yml
   when:
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
@@ -30,7 +35,8 @@
     - bootstrap-os
     - resolvconf
 
-- import_tasks: 0061-systemd-resolved.yml
+- name: Apply systemd-resolved settings
+  import_tasks: 0061-systemd-resolved.yml
   when:
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
@@ -39,13 +45,15 @@
     - bootstrap-os
     - resolvconf
 
-- import_tasks: 0062-networkmanager-unmanaged-devices.yml
+- name: Apply networkmanager unmanaged devices settings
+  import_tasks: 0062-networkmanager-unmanaged-devices.yml
   when:
     - networkmanager_enabled.rc == 0
   tags:
     - bootstrap-os
 
-- import_tasks: 0063-networkmanager-dns.yml
+- name: Apply networkmanager DNS settings
+  import_tasks: 0063-networkmanager-dns.yml
   when:
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
@@ -54,31 +62,36 @@
     - bootstrap-os
     - resolvconf
 
-- import_tasks: 0070-system-packages.yml
+- name: Install required system packages
+  import_tasks: 0070-system-packages.yml
   when:
     - not dns_late
   tags:
     - bootstrap-os
 
-- import_tasks: 0080-system-configurations.yml
+- name: Apply system configurations
+  import_tasks: 0080-system-configurations.yml
   when:
     - not dns_late
   tags:
     - bootstrap-os
 
-- import_tasks: 0081-ntp-configurations.yml
+- name: Configure NTP
+  import_tasks: 0081-ntp-configurations.yml
   when:
     - not dns_late
     - ntp_enabled
   tags:
     - bootstrap-os
 
-- import_tasks: 0090-etchosts.yml
+- name: Configure /etc/hosts
+  import_tasks: 0090-etchosts.yml
   tags:
     - bootstrap-os
     - etchosts
 
-- import_tasks: 0100-dhclient-hooks.yml
+- name: Configure dhclient
+  import_tasks: 0100-dhclient-hooks.yml
   when:
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
@@ -88,7 +101,8 @@
     - bootstrap-os
     - resolvconf
 
-- import_tasks: 0110-dhclient-hooks-undo.yml
+- name: Configure dhclient dhclient hooks
+  import_tasks: 0110-dhclient-hooks-undo.yml
   when:
     - dns_mode != 'none'
     - resolvconf_mode != 'host_resolvconf'
@@ -115,7 +129,8 @@
   tags:
     - bootstrap-os
 
-- import_tasks: 0120-growpart-azure-centos-7.yml
+- name: Grow partition on azure CentOS
+  import_tasks: 0120-growpart-azure-centos-7.yml
   when:
     - not dns_late
     - azure_check.stat.exists
diff --git a/roles/kubernetes/tokens/tasks/main.yml b/roles/kubernetes/tokens/tasks/main.yml
index d454a80cf..c9dfd071d 100644
--- a/roles/kubernetes/tokens/tasks/main.yml
+++ b/roles/kubernetes/tokens/tasks/main.yml
@@ -1,6 +1,7 @@
 ---
 
-- import_tasks: check-tokens.yml
+- name: Check tokens
+  import_tasks: check-tokens.yml
   tags:
     - k8s-secrets
     - k8s-gen-tokens
@@ -13,7 +14,8 @@
     mode: 0644
     group: "{{ kube_cert_group }}"
 
-- import_tasks: gen_tokens.yml
+- name: Generate tokens
+  import_tasks: gen_tokens.yml
   tags:
     - k8s-secrets
     - k8s-gen-tokens
diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml
index 86b0bd7f9..9aa0ea223 100644
--- a/roles/kubespray-defaults/tasks/fallback_ips.yml
+++ b/roles/kubespray-defaults/tasks/fallback_ips.yml
@@ -14,7 +14,7 @@
   run_once: yes
   tags: always
 
-- name: create fallback_ips_base
+- name: Create fallback_ips_base
   set_fact:
     fallback_ips_base: |
       ---
@@ -28,6 +28,6 @@
   become: no
   run_once: yes
 
-- name: set fallback_ips
+- name: Set fallback_ips
   set_fact:
     fallback_ips: "{{ hostvars.localhost.fallback_ips_base | from_yaml }}"
diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml
index 648a4af6e..ebd9b896b 100644
--- a/roles/kubespray-defaults/tasks/main.yaml
+++ b/roles/kubespray-defaults/tasks/main.yaml
@@ -6,7 +6,7 @@
     - always
 
 # do not run gather facts when bootstrap-os in roles
-- name: set fallback_ips
+- name: Set fallback_ips
   import_tasks: fallback_ips.yml
   when:
     - "'bootstrap-os' not in ansible_play_role_names"
@@ -14,7 +14,7 @@
   tags:
     - always
 
-- name: set no_proxy
+- name: Set no_proxy
   import_tasks: no_proxy.yml
   when:
     - "'bootstrap-os' not in ansible_play_role_names"
diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml
index fbcae3a24..7f998dba3 100644
--- a/roles/network_plugin/calico/handlers/main.yml
+++ b/roles/network_plugin/calico/handlers/main.yml
@@ -1,13 +1,13 @@
 ---
-- name: reset_calico_cni
+- name: Reset_calico_cni
   command: /bin/true
   when: calico_cni_config is defined
   notify:
-    - delete 10-calico.conflist
+    - Delete 10-calico.conflist
     - Calico | delete calico-node docker containers
     - Calico | delete calico-node crio/containerd containers
 
-- name: delete 10-calico.conflist
+- name: Delete 10-calico.conflist
   file:
     path: /etc/cni/net.d/10-calico.conflist
     state: absent
diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml
index 59841148c..fc873ba13 100644
--- a/roles/network_plugin/calico/rr/tasks/update-node.yml
+++ b/roles/network_plugin/calico/rr/tasks/update-node.yml
@@ -1,7 +1,8 @@
 ---
 # Workaround to retry a block of tasks, ansible doesn't have a direct way to do it,
 # you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203
-- block:
+- name: Calico-rr | Configure route reflector
+  block:
   - name: Set the retry count
     set_fact:
       retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}"
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 6dbcc3170..676a154c8 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -119,7 +119,8 @@
     - calico_pool_cidr_ipv6 is defined
     - enable_dual_stack_networks
 
-- block:
+- name: Calico | kdd specific configuration
+  block:
     - name: Calico | Check if extra directory is needed
       stat:
         path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3', '<')) else 'crd' }}"
@@ -157,7 +158,8 @@
     - inventory_hostname in groups['kube_control_plane']
     - calico_datastore == "kdd"
 
-- block:
+- name: Calico | Configure Felix
+  block:
     - name: Calico | Get existing FelixConfiguration
       command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
       register: _felix_cmd
@@ -201,7 +203,8 @@
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- block:
+- name: Calico | Configure Calico IP Pool
+  block:
     - name: Calico | Get existing calico network pool
       command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
       register: _calico_pool_cmd
@@ -240,7 +243,8 @@
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- block:
+- name: Calico | Configure Calico IPv6 Pool
+  block:
     - name: Calico | Get existing calico ipv6 network pool
       command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
       register: _calico_pool_ipv6_cmd
@@ -300,7 +304,8 @@
     - inventory_hostname in groups['k8s_cluster']
   run_once: yes
 
-- block:
+- name: Calico | Configure Calico BGP
+  block:
     - name: Calico | Get existing BGP Configuration
       command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
       register: _bgp_config_cmd
@@ -463,10 +468,12 @@
     - inventory_hostname == groups['kube_control_plane'][0]
     - calico_datastore == "kdd"
 
-- include_tasks: peer_with_calico_rr.yml
+- name: Calico | Peer with Calico Route Reflector
+  include_tasks: peer_with_calico_rr.yml
   when:
     - peer_with_calico_rr | default(false)
 
-- include_tasks: peer_with_router.yml
+- name: Calico | Peer with the router
+  include_tasks: peer_with_router.yml
   when:
     - peer_with_router | default(false)
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 81844fa4f..5921a91f3 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -1,6 +1,9 @@
 ---
-- import_tasks: pre.yml
+- name: Calico Pre tasks
+  import_tasks: pre.yml
 
-- import_tasks: repos.yml
+- name: Calico repos
+  import_tasks: repos.yml
 
-- include_tasks: install.yml
+- name: Calico install
+  include_tasks: install.yml
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
index fc87769f0..c1e9f2dcd 100644
--- a/roles/network_plugin/calico/tasks/pre.yml
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -5,7 +5,8 @@
   register: calico_cni_config_slurp
   failed_when: false
 
-- block:
+- name: Gather calico facts
+  block:
   - name: Set fact calico_cni_config from slurped CNI config
     set_fact:
       calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}"
diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml
index 48d2e5a00..8dab21462 100644
--- a/roles/network_plugin/calico/tasks/reset.yml
+++ b/roles/network_plugin/calico/tasks/reset.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset | check vxlan.calico network device
+- name: Reset | check vxlan.calico network device
   stat:
     path: /sys/class/net/vxlan.calico
     get_attributes: no
@@ -7,11 +7,11 @@
     get_mime: no
   register: vxlan
 
-- name: reset | remove the network vxlan.calico device created by calico
+- name: Reset | remove the network vxlan.calico device created by calico
   command: ip link del vxlan.calico
   when: vxlan.stat.exists
 
-- name: reset | check dummy0 network device
+- name: Reset | check dummy0 network device
   stat:
     path: /sys/class/net/dummy0
     get_attributes: no
@@ -19,11 +19,11 @@
     get_mime: no
   register: dummy0
 
-- name: reset | remove the network device created by calico
+- name: Reset | remove the network device created by calico
   command: ip link del dummy0
   when: dummy0.stat.exists
 
-- name: reset | get and remove remaining routes set by bird
+- name: Reset | get and remove remaining routes set by bird
   shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird "
   args:
     executable: /bin/bash
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 63c99dc06..8123c5a4c 100644
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -1,6 +1,9 @@
 ---
-- import_tasks: check.yml
+- name: Cilium check
+  import_tasks: check.yml
 
-- include_tasks: install.yml
+- name: Cilium install
+  include_tasks: install.yml
 
-- include_tasks: apply.yml
+- name: Cilium apply
+  include_tasks: apply.yml
diff --git a/roles/network_plugin/cilium/tasks/reset.yml b/roles/network_plugin/cilium/tasks/reset.yml
index 432df8a5c..b578b074e 100644
--- a/roles/network_plugin/cilium/tasks/reset.yml
+++ b/roles/network_plugin/cilium/tasks/reset.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset | check and remove devices if still present
+- name: Reset | check and remove devices if still present
   include_tasks: reset_iface.yml
   vars:
     iface: "{{ item }}"
diff --git a/roles/network_plugin/cilium/tasks/reset_iface.yml b/roles/network_plugin/cilium/tasks/reset_iface.yml
index d84a065af..e2f7c14af 100644
--- a/roles/network_plugin/cilium/tasks/reset_iface.yml
+++ b/roles/network_plugin/cilium/tasks/reset_iface.yml
@@ -1,5 +1,5 @@
 ---
-- name: "reset | check if network device {{ iface }} is present"
+- name: "Reset | check if network device {{ iface }} is present"
   stat:
     path: "/sys/class/net/{{ iface }}"
     get_attributes: no
@@ -7,6 +7,6 @@
     get_mime: no
   register: device_remains
 
-- name: "reset | remove network device {{ iface }}"
+- name: "Reset | remove network device {{ iface }}"
   command: "ip link del {{ iface }}"
   when: device_remains.stat.exists
diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml
index 2fd86e2bd..03d40a0c1 100644
--- a/roles/network_plugin/flannel/tasks/reset.yml
+++ b/roles/network_plugin/flannel/tasks/reset.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset | check cni network device
+- name: Reset | check cni network device
   stat:
     path: /sys/class/net/cni0
     get_attributes: no
@@ -7,11 +7,11 @@
     get_mime: no
   register: cni
 
-- name: reset | remove the network device created by the flannel
+- name: Reset | remove the network device created by the flannel
   command: ip link del cni0
   when: cni.stat.exists
 
-- name: reset | check flannel network device
+- name: Reset | check flannel network device
   stat:
     path: /sys/class/net/flannel.1
     get_attributes: no
@@ -19,6 +19,6 @@
     get_mime: no
   register: flannel
 
-- name: reset | remove the network device created by the flannel
+- name: Reset | remove the network device created by the flannel
   command: ip link del flannel.1
   when: flannel.stat.exists
diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml
index c0ddb33ad..0723dfd8a 100644
--- a/roles/network_plugin/kube-router/handlers/main.yml
+++ b/roles/network_plugin/kube-router/handlers/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset_kube_router
+- name: Reset_kube_router
   command: /bin/true
   notify:
     - Kube-router | delete kube-router docker containers
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index e91249f7d..67d57a2d3 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -1,19 +1,19 @@
 ---
-- name: kube-router | Add annotations on kube_control_plane
+- name: Kube-router | Add annotations on kube_control_plane
   command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_master }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
 
-- name: kube-router | Add annotations on kube_node
+- name: Kube-router | Add annotations on kube_node
   command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
 
-- name: kube-router | Add common annotations on all servers
+- name: Kube-router | Add common annotations on all servers
   command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml
index 23b3af964..27975c374 100644
--- a/roles/network_plugin/kube-router/tasks/main.yml
+++ b/roles/network_plugin/kube-router/tasks/main.yml
@@ -1,9 +1,9 @@
 ---
-- name: kube-router | Create annotations
+- name: Kube-router | Create annotations
   import_tasks: annotate.yml
   tags: annotate
 
-- name: kube-router | Create config directory
+- name: Kube-router | Create config directory
   file:
     path: /var/lib/kube-router
     state: directory
@@ -11,49 +11,49 @@
     recurse: true
     mode: 0755
 
-- name: kube-router | Create kubeconfig
+- name: Kube-router | Create kubeconfig
   template:
     src: kubeconfig.yml.j2
     dest: /var/lib/kube-router/kubeconfig
     mode: 0644
     owner: "{{ kube_owner }}"
   notify:
-    - reset_kube_router
+    - Reset_kube_router
 
-- name: kube-router | Slurp cni config
+- name: Kube-router | Slurp cni config
   slurp:
     src: /etc/cni/net.d/10-kuberouter.conflist
   register: cni_config_slurp
   ignore_errors: true  # noqa ignore-errors
 
-- name: kube-router | Set cni_config variable
+- name: Kube-router | Set cni_config variable
   set_fact:
     cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}"
   when:
     - not cni_config_slurp.failed
 
-- name: kube-router | Set host_subnet variable
+- name: Kube-router | Set host_subnet variable
   set_fact:
     host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}"
   when:
     - cni_config is defined
     - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0
 
-- name: kube-router | Create cni config
+- name: Kube-router | Create cni config
   template:
     src: cni-conf.json.j2
     dest: /etc/cni/net.d/10-kuberouter.conflist
     mode: 0644
     owner: "{{ kube_owner }}"
   notify:
-    - reset_kube_router
+    - Reset_kube_router
 
-- name: kube-router | Delete old configuration
+- name: Kube-router | Delete old configuration
   file:
     path: /etc/cni/net.d/10-kuberouter.conf
     state: absent
 
-- name: kube-router | Create manifest
+- name: Kube-router | Create manifest
   template:
     src: kube-router.yml.j2
     dest: "{{ kube_config_dir }}/kube-router.yml"
diff --git a/roles/network_plugin/kube-router/tasks/reset.yml b/roles/network_plugin/kube-router/tasks/reset.yml
index 7b8ad2ceb..ae9ee55c7 100644
--- a/roles/network_plugin/kube-router/tasks/reset.yml
+++ b/roles/network_plugin/kube-router/tasks/reset.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset | check kube-dummy-if network device
+- name: Reset | check kube-dummy-if network device
   stat:
     path: /sys/class/net/kube-dummy-if
     get_attributes: no
@@ -7,11 +7,11 @@
     get_mime: no
   register: kube_dummy_if
 
-- name: reset | remove the network device created by kube-router
+- name: Reset | remove the network device created by kube-router
   command: ip link del kube-dummy-if
   when: kube_dummy_if.stat.exists
 
-- name: check kube-bridge exists
+- name: Check kube-bridge exists
   stat:
     path: /sys/class/net/kube-bridge
     get_attributes: no
@@ -19,10 +19,10 @@
     get_mime: no
   register: kube_bridge_if
 
-- name: reset | donw the network bridge create by kube-router
+- name: Reset | donw the network bridge create by kube-router
   command: ip link set kube-bridge down
   when: kube_bridge_if.stat.exists
 
-- name: reset | remove the network bridge create by kube-router
+- name: Reset | remove the network bridge create by kube-router
   command: ip link del kube-bridge
   when: kube_bridge_if.stat.exists
diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml
index 2b486cce2..e50a742c3 100644
--- a/roles/network_plugin/macvlan/tasks/main.yml
+++ b/roles/network_plugin/macvlan/tasks/main.yml
@@ -27,7 +27,8 @@
   notify: Macvlan | restart network
   when: ansible_os_family in ["Debian"]
 
-- block:
+- name: Install macvlan config on RH distros
+  block:
   - name: Macvlan | Install macvlan script on centos
     copy:
       src: "{{ item }}"
@@ -60,7 +61,8 @@
 
   when: ansible_os_family == "RedHat"
 
-- block:
+- name: Install macvlan config on Flatcar
+  block:
   - name: Macvlan | Install service nat via gateway on Flatcar Container Linux
     template:
       src: coreos-service-nat_ouside.j2
diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml
index da212662f..777fd9a2d 100644
--- a/roles/network_plugin/ovn4nfv/tasks/main.yml
+++ b/roles/network_plugin/ovn4nfv/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
-- name: ovn4nfv | Label control-plane node
+- name: Ovn4nfv | Label control-plane node
   command: >-
     {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
-- name: ovn4nfv | Create ovn4nfv-k8s manifests
+- name: Ovn4nfv | Create ovn4nfv-k8s manifests
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 0ebd624c8..3c3034492 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -26,7 +26,8 @@
   when:
     - inventory_hostname in groups['broken_etcd']
 
-- include_tasks: recover_lost_quorum.yml
+- name: Recover lost etcd quorum
+  include_tasks: recover_lost_quorum.yml
   when:
     - groups['broken_etcd']
     - not has_quorum
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 61694547f..bc8bfd6d6 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: remove-node | Delete node
+- name: Remove-node | Delete node
   command: "{{ kubectl }} delete node {{ kube_override_hostname | default(inventory_hostname) }}"
   delegate_to: "{{ groups['kube_control_plane'] | first }}"
   when:
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index d16df1a36..6f6c31461 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: remove-node | List nodes
+- name: Remove-node | List nodes
   command: >-
     {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %}
   register: nodes
@@ -9,7 +9,7 @@
   changed_when: false
   run_once: true
 
-- name: remove-node | Drain node except daemonsets resource
+- name: Remove-node | Drain node except daemonsets resource
   command: >-
     {{ kubectl }} drain
       --force
@@ -28,7 +28,7 @@
   retries: "{{ drain_retries }}"
   delay: "{{ drain_retry_delay_seconds }}"
 
-- name: remove-node | Wait until Volumes will be detached from the node
+- name: Remove-node | Wait until Volumes will be detached from the node
   command: >-
     {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %}
   register: nodes_with_volumes
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 534033d18..ae74473ea 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: reset | stop services
+- name: Reset | stop services
   service:
     name: "{{ item }}"
     state: stopped
@@ -11,7 +11,7 @@
   tags:
     - services
 
-- name: reset | remove services
+- name: Reset | remove services
   file:
     path: "/etc/systemd/system/{{ item }}"
     state: absent
@@ -30,7 +30,7 @@
     - containerd
     - crio
 
-- name: reset | Remove Docker
+- name: Reset | Remove Docker
   include_role:
     name: container-engine/docker
     tasks_from: reset
@@ -38,12 +38,12 @@
   tags:
     - docker
 
-- name: reset | systemctl daemon-reload  # noqa no-handler
+- name: Reset | systemctl daemon-reload  # noqa no-handler
   systemd:
     daemon_reload: true
   when: services_removed.changed
 
-- name: reset | check if crictl is present
+- name: Reset | check if crictl is present
   stat:
     path: "{{ bin_dir }}/crictl"
     get_attributes: no
@@ -51,7 +51,7 @@
     get_mime: no
   register: crictl
 
-- name: reset | stop all cri containers
+- name: Reset | stop all cri containers
   shell: "set -o pipefail && {{ bin_dir }}/crictl ps -q | xargs -r {{ bin_dir }}/crictl -t 60s stop"
   args:
     executable: /bin/bash
@@ -68,7 +68,7 @@
     - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined
   ignore_errors: true  # noqa ignore-errors
 
-- name: reset | force remove all cri containers
+- name: Reset | force remove all cri containers
   command: "{{ bin_dir }}/crictl rm -a -f"
   register: remove_all_cri_containers
   retries: 5
@@ -84,7 +84,7 @@
     - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined
   ignore_errors: true  # noqa ignore-errors
 
-- name: reset | stop and disable crio service
+- name: Reset | stop and disable crio service
   service:
     name: crio
     state: stopped
@@ -93,13 +93,13 @@
   tags: [ crio ]
   when: container_manager == "crio"
 
-- name: reset | forcefully wipe CRI-O's container and image storage
+- name: Reset | forcefully wipe CRI-O's container and image storage
   command: "crio wipe -f"
   failed_when: false
   tags: [ crio ]
   when: container_manager == "crio"
 
-- name: reset | stop all cri pods
+- name: Reset | stop all cri pods
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods -q | xargs -r {{ bin_dir }}/crictl -t 60s stopp"
   args:
     executable: /bin/bash
@@ -114,8 +114,9 @@
     - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined
   ignore_errors: true  # noqa ignore-errors
 
-- block:
-    - name: reset | force remove all cri pods
+- name: Reset | force remove all cri pods
+  block:
+    - name: Reset | force remove all cri pods
       command: "{{ bin_dir }}/crictl rmp -a -f"
       register: remove_all_cri_containers
       retries: 5
@@ -128,12 +129,12 @@
         - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined
 
   rescue:
-    - name: reset | force remove all cri pods (rescue)
+    - name: Reset | force remove all cri pods (rescue)
       shell: "ip netns list | cut -d' ' -f 1 | xargs -n1 ip netns delete && {{ bin_dir }}/crictl rmp -a -f"
       ignore_errors: true  # noqa ignore-errors
       changed_when: true
 
-- name: reset | stop etcd services
+- name: Reset | stop etcd services
   service:
     name: "{{ item }}"
     state: stopped
@@ -144,7 +145,7 @@
   tags:
     - services
 
-- name: reset | remove etcd services
+- name: Reset | remove etcd services
   file:
     path: "/etc/systemd/system/{{ item }}.service"
     state: absent
@@ -155,10 +156,10 @@
   tags:
     - services
 
-- name: reset | remove containerd
+- name: Reset | remove containerd
   when: container_manager == 'containerd'
   block:
-    - name: reset | stop containerd service
+    - name: Reset | stop containerd service
       service:
         name: containerd
         state: stopped
@@ -166,7 +167,7 @@
       tags:
         - services
 
-    - name: reset | remove containerd service
+    - name: Reset | remove containerd service
       file:
         path: /etc/systemd/system/containerd.service
         state: absent
@@ -174,7 +175,7 @@
       tags:
         - services
 
-- name: reset | gather mounted kubelet dirs
+- name: Reset | gather mounted kubelet dirs
   shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
   args:
     executable: /bin/bash
@@ -185,7 +186,7 @@
   tags:
     - mounts
 
-- name: reset | unmount kubelet dirs
+- name: Reset | unmount kubelet dirs
   command: umount -f {{ item }}
   with_items: "{{ mounted_dirs.stdout_lines }}"
   register: umount_dir
@@ -196,7 +197,7 @@
   tags:
     - mounts
 
-- name: flush iptables
+- name: Flush iptables
   iptables:
     table: "{{ item }}"
     flush: yes
@@ -209,7 +210,7 @@
   tags:
     - iptables
 
-- name: flush ip6tables
+- name: Flush ip6tables
   iptables:
     table: "{{ item }}"
     flush: yes
@@ -229,7 +230,7 @@
   when:
     - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
 
-- name: reset | check kube-ipvs0 network device
+- name: Reset | check kube-ipvs0 network device
   stat:
     path: /sys/class/net/kube-ipvs0
     get_attributes: no
@@ -237,13 +238,13 @@
     get_mime: no
   register: kube_ipvs0
 
-- name: reset | Remove kube-ipvs0
+- name: Reset | Remove kube-ipvs0
   command: "ip link del kube-ipvs0"
   when:
     - kube_proxy_mode == 'ipvs'
     - kube_ipvs0.stat.exists
 
-- name: reset | check nodelocaldns network device
+- name: Reset | check nodelocaldns network device
   stat:
     path: /sys/class/net/nodelocaldns
     get_attributes: no
@@ -251,13 +252,13 @@
     get_mime: no
   register: nodelocaldns_device
 
-- name: reset | Remove nodelocaldns
+- name: Reset | Remove nodelocaldns
   command: "ip link del nodelocaldns"
   when:
     - enable_nodelocaldns | default(false) | bool
     - nodelocaldns_device.stat.exists
 
-- name: reset | Check whether /var/lib/kubelet directory exists
+- name: Reset | Check whether /var/lib/kubelet directory exists
   stat:
     path: /var/lib/kubelet
     get_attributes: no
@@ -265,7 +266,7 @@
     get_mime: no
   register: var_lib_kubelet_directory
 
-- name: reset | Find files/dirs with immutable flag in /var/lib/kubelet
+- name: Reset | Find files/dirs with immutable flag in /var/lib/kubelet
   command: lsattr -laR /var/lib/kubelet
   become: true
   register: var_lib_kubelet_files_dirs_w_attrs
@@ -273,7 +274,7 @@
   no_log: true
   when: var_lib_kubelet_directory.stat.exists
 
-- name: reset | Remove immutable flag from files/dirs in /var/lib/kubelet
+- name: Reset | Remove immutable flag from files/dirs in /var/lib/kubelet
   file:
     path: "{{ filedir_path }}"
     state: touch
@@ -287,7 +288,7 @@
     filedir_path: "{{ file_dir_line.split(' ')[0] }}"
   when: var_lib_kubelet_directory.stat.exists
 
-- name: reset | delete some files and directories
+- name: Reset | delete some files and directories
   file:
     path: "{{ item }}"
     state: absent
@@ -375,7 +376,7 @@
   tags:
     - files
 
-- name: reset | remove containerd binary files
+- name: Reset | remove containerd binary files
   file:
     path: "{{ containerd_bin_dir }}/{{ item }}"
     state: absent
@@ -395,7 +396,7 @@
   tags:
     - files
 
-- name: reset | remove dns settings from dhclient.conf
+- name: Reset | remove dns settings from dhclient.conf
   blockinfile:
     path: "{{ item }}"
     state: absent
@@ -408,7 +409,7 @@
     - files
     - dns
 
-- name: reset | remove host entries from /etc/hosts
+- name: Reset | remove host entries from /etc/hosts
   blockinfile:
     path: "/etc/hosts"
     state: absent
@@ -417,7 +418,7 @@
     - files
     - dns
 
-- name: reset | include file with reset tasks specific to the network_plugin if exists
+- name: Reset | include file with reset tasks specific to the network_plugin if exists
   include_role:
     name: "network_plugin/{{ kube_network_plugin }}"
     tasks_from: reset
@@ -426,7 +427,7 @@
   tags:
     - network
 
-- name: reset | Restart network
+- name: Reset | Restart network
   service:
     # noqa: jinja[spacing]
     name: >-
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index fb33dcf93..434ef1eea 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
-- name: wait for cilium
+- name: Wait for cilium
   when:
     - needs_cordoning | default(false)
     - kube_network_plugin == 'cilium'
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 02fbe4694..923a6a85c 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -1,5 +1,6 @@
 ---
-- hosts: all
+- name: Collect debug info
+  hosts: all
   become: true
   gather_facts: no
 
@@ -104,7 +105,7 @@
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
 
   tasks:
-    - name: set etcd_access_addresses
+    - name: Set etcd_access_addresses
       set_fact:
         etcd_access_addresses: |-
           {% for item in groups['etcd'] -%}
diff --git a/test-infra/image-builder/cluster.yml b/test-infra/image-builder/cluster.yml
index a25de7ff4..4a622ca2f 100644
--- a/test-infra/image-builder/cluster.yml
+++ b/test-infra/image-builder/cluster.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: image-builder
+- name: Build kubevirt images
+  hosts: image-builder
   gather_facts: false
   roles:
     - kubevirt-images
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index 68eb6cf81..99c1c1c87 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -47,12 +47,12 @@
   command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
   loop: "{{ images | dict2items }}"
 
-- name: docker login
+- name: Docker login
   command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
 
-- name: docker push image
+- name: Docker push image
   command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
   loop: "{{ images | dict2items }}"
 
-- name: docker logout
+- name: Docker logout
   command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
diff --git a/tests/cloud_playbooks/cleanup-packet.yml b/tests/cloud_playbooks/cleanup-packet.yml
index b709d6d0d..009071ec2 100644
--- a/tests/cloud_playbooks/cleanup-packet.yml
+++ b/tests/cloud_playbooks/cleanup-packet.yml
@@ -1,6 +1,7 @@
 ---
 
-- hosts: localhost
+- name: Cleanup packet vms
+  hosts: localhost
   gather_facts: no
   become: true
   roles:
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index a4628f424..3a31d29b1 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Provision AWS VMs
+  hosts: localhost
   become: False
   gather_facts: False
 
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index d675527f6..3c250620b 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Provision Digital Ocean VMs
+  hosts: localhost
   become: false
   gather_facts: no
   vars:
@@ -47,15 +48,15 @@
     mode: default
 
   tasks:
-    - name: replace_test_id
+    - name: Replace_test_id
       set_fact:
         test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
-    - name: show vars
+    - name: Show vars
       debug:
         msg: "{{ cloud_region }}, {{ cloud_image }}"
 
-    - name: set instance names
+    - name: Set instance names
       set_fact:
         # noqa: jinja[spacing]
         instance_names: >-
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index c3f17f450..78c96b085 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Provision Google Cloud VMs
+  hosts: localhost
   become: false
   gather_facts: no
   vars:
@@ -9,14 +10,14 @@
     ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
     delete_group_vars: no
   tasks:
-    - name: include vars for test {{ ci_job_name }}
+    - name: Include vars for test {{ ci_job_name }}
       include_vars: "../files/{{ ci_job_name }}.yml"
 
-    - name: replace_test_id
+    - name: Replace_test_id
       set_fact:
         test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
-    - name: set instance names
+    - name: Set instance names
       set_fact:
         # noqa: jinja[spacing]
         instance_names: >-
diff --git a/tests/cloud_playbooks/create-packet.yml b/tests/cloud_playbooks/create-packet.yml
index 0136ab38d..8212fb6c8 100644
--- a/tests/cloud_playbooks/create-packet.yml
+++ b/tests/cloud_playbooks/create-packet.yml
@@ -1,6 +1,7 @@
 ---
 
-- hosts: localhost
+- name: Provision Packet VMs
+  hosts: localhost
   gather_facts: no
   become: true
   vars:
diff --git a/tests/cloud_playbooks/delete-aws.yml b/tests/cloud_playbooks/delete-aws.yml
index e207a9844..cd5a20061 100644
--- a/tests/cloud_playbooks/delete-aws.yml
+++ b/tests/cloud_playbooks/delete-aws.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_node
+- name: Terminate AWS VMs
+  hosts: kube_node
   become: False
 
   tasks:
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index f8c5d6e94..8752f2485 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -1,16 +1,17 @@
 ---
-- hosts: localhost
+- name: Terminate Google Cloud VMs
+  hosts: localhost
   become: false
   gather_facts: no
   vars:
     mode: default
 
   tasks:
-    - name: replace_test_id
+    - name: Replace_test_id
       set_fact:
         test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
-    - name: set instance names
+    - name: Set instance names
       set_fact:
         # noqa: jinja[spacing]
         instance_names: >-
@@ -20,7 +21,7 @@
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
-    - name: stop gce instances  # noqa args[module] - Probably doesn't work
+    - name: Stop gce instances  # noqa args[module] - Probably doesn't work
       google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
@@ -34,7 +35,7 @@
       poll: 3
       register: gce
 
-    - name: delete gce instances  # noqa args[module] - Probably doesn't work
+    - name: Delete gce instances  # noqa args[module] - Probably doesn't work
       google.cloud.gcp_compute_instance:
         instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
diff --git a/tests/cloud_playbooks/delete-packet.yml b/tests/cloud_playbooks/delete-packet.yml
index 3895263ce..7d0c9003c 100644
--- a/tests/cloud_playbooks/delete-packet.yml
+++ b/tests/cloud_playbooks/delete-packet.yml
@@ -1,6 +1,7 @@
 ---
 
-- hosts: localhost
+- name: Terminate Packet VMs
+  hosts: localhost
   gather_facts: no
   become: true
   vars:
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
index 37e61cd62..633c87253 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
@@ -7,12 +7,15 @@
   set_fact:
     vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3 | int }}{%- elif mode == 'aio' -%}{{ 1 | int }}{%- else -%}{{ 2 | int }}{%- endif -%}"
 
-- import_tasks: cleanup-old-vms.yml
+- name: Cleamup old VMs
+  import_tasks: cleanup-old-vms.yml
 
-- import_tasks: create-vms.yml
+- name: Create VMs
+  import_tasks: create-vms.yml
   when:
     - not vm_cleanup
 
-- import_tasks: delete-vms.yml
+- name: Delete VMs
+  import_tasks: delete-vms.yml
   when:
     - vm_cleanup | default(false)
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index 73ae6c5f5..cae06f2f5 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: localhost
+- name: Upload logs to GCS
+  hosts: localhost
   become: false
   gather_facts: no
 
@@ -12,7 +13,7 @@
       changed_when: false
       register: out
 
-    - name: replace_test_id
+    - name: Replace_test_id
       set_fact:
         test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
 
diff --git a/tests/cloud_playbooks/wait-for-ssh.yml b/tests/cloud_playbooks/wait-for-ssh.yml
index 7c439d9a1..0e09c9f04 100644
--- a/tests/cloud_playbooks/wait-for-ssh.yml
+++ b/tests/cloud_playbooks/wait-for-ssh.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: all
+- name: Wait until SSH is available
+  hosts: all
   become: False
   gather_facts: False
 
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index 961df9bc4..0d20bda02 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_control_plane
+- name: Testcases for apiserver
+  hosts: kube_control_plane
 
   tasks:
   - name: Check the API servers are responding
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index bb2f28323..69945ca58 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_control_plane[0]
+- name: Testcases checking nodes
+  hosts: kube_control_plane[0]
   tasks:
 
   - name: Force binaries directory for Flatcar Container Linux by Kinvolk
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index d1dc17c2a..54f39ea97 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_control_plane[0]
+- name: Testcases checking pods
+  hosts: kube_control_plane[0]
   tasks:
 
   - name: Force binaries directory for Flatcar Container Linux by Kinvolk
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index b5f1c2b6e..3b56940c5 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_control_plane[0]
+- name: Testcases for network
+  hosts: kube_control_plane[0]
   vars:
     test_image_repo: registry.k8s.io/e2e-test-images/agnhost
     test_image_tag: "2.40"
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 0542e1245..4fc70eb07 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_node
+- name: Testcases for calico
+  hosts: kube_node
   tasks:
     - name: Test tunl0 routes
       shell: "set -o pipefail && ! /sbin/ip ro | grep '/{{ calico_pool_blocksize }} | default(26) via' | grep -v tunl0"
@@ -9,7 +10,8 @@
         - (calico_ipip_mode is defined and calico_ipip_mode != 'Never' or cloud_provider is defined)
         - kube_network_plugin | default('calico') == 'calico'
 
-- hosts: k8s_cluster
+- name: Advanced testcases for network
+  hosts: k8s_cluster
   vars:
     agent_report_interval: 10
     netcheck_namespace: default
diff --git a/tests/testcases/100_check-k8s-conformance.yml b/tests/testcases/100_check-k8s-conformance.yml
index a64ef11de..0247793db 100644
--- a/tests/testcases/100_check-k8s-conformance.yml
+++ b/tests/testcases/100_check-k8s-conformance.yml
@@ -1,5 +1,6 @@
 ---
-- hosts: kube_control_plane[0]
+- name: Testcases for kubernetes conformance
+  hosts: kube_control_plane[0]
   vars:
     sonobuoy_version: 0.56.11
     sonobuoy_arch: amd64
-- 
GitLab