diff --git a/.ansible-lint b/.ansible-lint
index c44f782b662f3e427bcafb6c88b115f2c29bebed..ede661355fb64511668959528997963dccadbef6 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -9,6 +9,5 @@ skip_list:
   - '305'
   - '306'
   - '404'
-  - '502'
   - '503'
   - '701'
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index 409555fd00fe78c40212800cffb1b8d47da0cb6d..20a06e10c4037645982e7d65a68aa66e32e24f51 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -4,7 +4,8 @@
   command: azure vm list-ip-address --json {{ azure_resource_group }}
   register: vm_list_cmd
 
-- set_fact:
+- name: Set vm_list
+  set_fact:
     vm_list: "{{ vm_list_cmd.stdout }}"
 
 - name: Generate inventory
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index 1772b1c29298dfcc7886e74fd6b33fa8453616bf..f639e64c72b6a6a0ac76d917a7465f7779232329 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -8,7 +8,8 @@
   command: az vm list -o json --resource-group {{ azure_resource_group }}
   register: vm_list_cmd
 
-- set_fact:
+- name: Set VM IP and roles lists
+  set_fact:
     vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
     vm_roles_list: "{{ vm_list_cmd.stdout }}"
 
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 92a0e87c9f8a32c98aed1e435d62627dd15feaf1..489250a98bf312010b3caed4b56568da88f7e911 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -1,13 +1,16 @@
 ---
-- set_fact:
+- name: Set base_dir
+  set_fact:
     base_dir: "{{ playbook_dir }}/.generated/"
 
-- file:
+- name: Create base_dir
+  file:
     path: "{{ base_dir }}"
     state: directory
     recurse: true
 
-- template:
+- name: Store json files in base_dir
+  template:
     src: "{{ item }}"
     dest: "{{ base_dir }}/{{ item }}"
   with_items:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
index 0368f4e7b0af9af5dcd1f94943ce5d053ff66e9b..f0111cec01a63cb42b4af0036a34168b6082af99 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
@@ -4,6 +4,7 @@
   register: "initial_heketi_state"
   changed_when: false
   command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
+
 - name: "Bootstrap heketi."
   when:
     - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
@@ -16,15 +17,20 @@
   register: "initial_heketi_pod"
   command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
   changed_when: false
+
 - name: "Ensure heketi bootstrap pod is up."
   assert:
     that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
-- set_fact:
+
+- name: Store the initial heketi pod name
+  set_fact:
     initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
+
 - name: "Test heketi topology."
   changed_when: false
   register: "heketi_topology"
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+
 - name: "Load heketi topology."
   when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
   include_tasks: "bootstrap/topology.yml"
@@ -42,6 +48,7 @@
   command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
   changed_when: false
   register: "heketi_storage_state"
+
 # ensure endpoints actually exist before trying to move database data to it
 - name: "Create heketi storage."
   include_tasks: "bootstrap/storage.yml"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
index 61729a5e29b207aeec550c143b03b4d4930dc049..ae598c3dfcbc976bad0f5daa55b5e9a28597cb35 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
@@ -1,11 +1,19 @@
 ---
-- register: "label_present"
+- name: Get storage nodes
+  register: "label_present"
   command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
   changed_when: false
+
 - name: "Assign storage label"
   when: "label_present.stdout_lines|length == 0"
   command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
-- register: "label_present"
+
+- name: Get storage nodes again
+  register: "label_present"
   command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
   changed_when: false
-- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
+
+- name: Ensure the label has been set
+  assert:
+    that: "label_present|length > 0"
+    msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index d322f6ff8dceb5327846625a760908f721817f49..7b6d37d24aa60bbd6ededea0f0f16d0b72819187 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -1,19 +1,24 @@
 ---
 - name: "Kubernetes Apps | Lay Down Heketi"
   become: true
-  template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
+  template:
+    src: "heketi-deployment.json.j2"
+    dest: "{{ kube_config_dir }}/heketi-deployment.json"
   register: "rendering"
+
 - name: "Kubernetes Apps | Install and configure Heketi"
   kube:
     name: "GlusterFS"
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-deployment.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
+
 - name: "Ensure heketi is up and running."
   changed_when: false
   register: "heketi_state"
   vars:
-    heketi_state: { stdout: "{}" }
+    heketi_state:
+      stdout: "{}"
     pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
     deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
   command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
@@ -22,5 +27,7 @@
     - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
   retries: 60
   delay: 5
-- set_fact:
+
+- name: Set the Heketi pod name
+  set_fact:
     heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
index 96f2430485f36c55aba736594e7f42970f3a0781..3615f7c6d4329725031d0b7bf1bf14902995e989 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -1,31 +1,44 @@
 ---
-- register: "clusterrolebinding_state"
+- name: Get clusterrolebindings
+  register: "clusterrolebinding_state"
   command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
+
 - name: "Kubernetes Apps | Deploy cluster role binding."
   when: "clusterrolebinding_state.stdout == \"\""
   command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
-- register: "clusterrolebinding_state"
+
+- name: Get clusterrolebindings again
+  register: "clusterrolebinding_state"
   command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
-- assert:
+
+- name: Make sure that clusterrolebindings are present now
+  assert:
     that: "clusterrolebinding_state.stdout != \"\""
     msg: "Cluster role binding is not present."
 
-- register: "secret_state"
+- name: Get the heketi-config-secret secret
+  register: "secret_state"
   command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
+
 - name: "Render Heketi secret configuration."
   become: true
   template:
     src: "heketi.json.j2"
     dest: "{{ kube_config_dir }}/heketi.json"
+
 - name: "Deploy Heketi config secret"
   when: "secret_state.stdout == \"\""
   command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
-- register: "secret_state"
+
+- name: Get the heketi-config-secret secret again
+  register: "secret_state"
   command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
-- assert:
+
+- name: Make sure the heketi-config-secret secret exists now
+  assert:
     that: "secret_state.stdout != \"\""
     msg: "Heketi config secret is not present."
diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml
index 71c96db2119818c5ef5cc0ba8fd547d03462da53..7ea39bbd8cd62197ce82a187aeb5ba1e7c03eae5 100644
--- a/roles/bastion-ssh-config/tasks/main.yml
+++ b/roles/bastion-ssh-config/tasks/main.yml
@@ -1,11 +1,13 @@
 ---
-- set_fact:
+- name: set bastion host IP
+  set_fact:
     bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
   delegate_to: localhost
 
 # As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
 # To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
-- set_fact:
+- name: Store the current ansible_user in the real_user fact
+  set_fact:
     real_user: "{{ ansible_user }}"
 
 - name: create ssh bastion conf
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index 0b979cc7d3b0c32cb98abafeae08c14b6db2c934..2530a29ef7a20c7e483f8c071598087b561825fa 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -4,7 +4,8 @@
     path: /run/ostree-booted
   register: ostree
 
-- set_fact:
+- name: set is_atomic
+  set_fact:
     is_atomic: "{{ ostree.stat.exists }}"
 
 - name: gather os specific variables
diff --git a/roles/download/tasks/kubeadm_images.yml b/roles/download/tasks/kubeadm_images.yml
index 8257bccc120007d84b6c918ad636fb4a6ed105c7..079dd7509e580f395a562e93f1a1eb10fbf2863f 100644
--- a/roles/download/tasks/kubeadm_images.yml
+++ b/roles/download/tasks/kubeadm_images.yml
@@ -49,7 +49,8 @@
   when: download_run_once
   changed_when: false
 
-- vars:
+- name: container_download | extract container names from list of kubeadm config images
+  vars:
     kubeadm_images_list: "{{ result.stdout_lines }}"
   set_fact:
     kubeadm_image:
@@ -66,7 +67,8 @@
   when: download_run_once
   register: result_images
 
-- set_fact:
+- name: container_download | set kubeadm_images
+  set_fact:
     kubeadm_images: "{{ result_images.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
   run_once: true
   when: download_run_once
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index f9764ae6afc1963cb2243fc705bf3cf50208cbdc..174085f2d2a2e942b3f046ade634adaa55bcb420 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -9,7 +9,8 @@
     - not skip_downloads|default(false)
     - inventory_hostname in groups['kube-master']
 
-- set_fact:
+- name: Set kubeadm_images
+  set_fact:
     kubeadm_images: {}
   when:
     - kubeadm_images is not defined
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 72d898df13dd0bdabdd342b24025adde9cb2259e..3695a38685fe711c70703f26ee2b971009f268c7 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -1,9 +1,11 @@
 ---
-- set_fact:
+- name: Set if containers should be pulled by digest
+  set_fact:
     pull_by_digest: >-
       {%- if download.sha256 is defined and download.sha256 -%}true{%- else -%}false{%- endif -%}
 
-- set_fact:
+- name: Set pull_args
+  set_fact:
     pull_args: >-
       {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
 
@@ -19,7 +21,8 @@
     - not download_always_pull
     - group_names | intersect(download.groups) | length
 
-- set_fact:
+- name: Set if pull is required per container
+  set_fact:
     pull_required: >-
       {%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
   when:
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index fd46766ee5c3c705809551f25952da35054e33ed..ac0cf9dd04ae2b210b878873218ae8aecbcf70bb 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -7,14 +7,14 @@
   tags:
     - facts
 
-- set_fact:
+- name: container_download | Set file name of container tarballs
+  set_fact:
     fname: "{{ local_release_dir }}/containers/{{ download.repo|regex_replace('/|\0|:', '_') }}:{{ download.tag|default(download.sha256)|regex_replace('/|\0|:', '_') }}.tar"
   run_once: true
   when:
     - download.enabled
     - download.container
     - download_run_once
-
   tags:
     - facts
 
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 66b2030a5381f45769b88e1800e27e0187219bc7..d882b0f946e635699ebb39cab5ea21b84f5ad395 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -109,7 +109,8 @@
   loop_control:
     label: "{{ item.item }}"
 
-- set_fact:
+- name: Gen_certs | Set cert names per node
+  set_fact:
     my_etcd_node_certs: ['ca.pem',
                          'node-{{ inventory_hostname }}.pem',
                          'node-{{ inventory_hostname }}-key.pem']
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index c729b880d5e5402ea266dcf82c0360f8b322812e..30112176ea6f0b91664805c3af2f81c07e01ddf6 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
-- set_fact:
+- name: set architecture_groups
+  set_fact:
     architecture_groups:
       x86_64: amd64
       aarch64: arm64
diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
index 053fbc0db44ac4b99a27da672e2dbb5b55dbceaf..f6500f7c1cd91d77dff7334240568ebaad0dad15 100644
--- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
+++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
@@ -37,7 +37,8 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
 
-- set_fact:
+- name: Check_helm_client_certs | Set helm_client_certs
+  set_fact:
     helm_client_certs: ['ca.pem', 'cert.pem', 'key.pem']
 
 - name: "Check_helm_client_certs | check if a cert already exists on master node"
diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
index a080aa4f063c00fdaf0c316e86714b9c8926089f..9611d1a47859eb67fa5a91e8106d27cae8aade94 100644
--- a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
+++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
@@ -16,7 +16,8 @@
   run_once: true
   changed_when: false
 
-- set_fact:
+- name: Contiv | Set contiv_global_config
+  set_fact:
     contiv_global_config: "{{ (global_config.stdout|from_json)[0] }}"
 
 - name: Contiv | Set global forwarding mode
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index 98a6ba73f1374cbe7af8df451d8b6c2d46d6ed85..6f8539c0ec098e88a60d5dcfca7a19cee926d630 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -4,7 +4,8 @@
   register: docker_cgroup_driver_result
   changed_when: false
 
-- set_fact:
+- name: set facts
+  set_fact:
     standalone_kubelet: >-
       {%- if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] -%}true{%- else -%}false{%- endif -%}
     kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index c06207fd0692342b8ff1cfd5b691adf242c4c3fc..7fa7507ae899adf894bb0e5b1eab7a7c0168e9db 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -1,5 +1,6 @@
 ---
-- set_fact:
+- name: set architecture_groups
+  set_fact:
     architecture_groups:
       x86_64: amd64
       aarch64: arm64
@@ -25,10 +26,12 @@
     path: /run/ostree-booted
   register: ostree
 
-- set_fact:
+- name: set is_atomic
+  set_fact:
     is_atomic: "{{ ostree.stat.exists }}"
 
-- set_fact:
+- name: set kube_cert_group on atomic hosts
+  set_fact:
     kube_cert_group: "kube"
   when: is_atomic
 
@@ -39,11 +42,10 @@
   changed_when: false
   check_mode: no
 
-- set_fact:
+- name: set dns facts
+  set_fact:
     resolvconf: >-
       {%- if resolvconf.rc == 0 -%}true{%- else -%}false{%- endif -%}
-
-- set_fact:
     bogus_domains: |-
       {% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
       {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml
index d626cbd689f8ba01316324c0f9cb240821fd7405..dafe2d7aebee5409920d41f78a4d9c85d2b9f02c 100644
--- a/roles/network_plugin/contiv/tasks/main.yml
+++ b/roles/network_plugin/contiv/tasks/main.yml
@@ -56,7 +56,8 @@
       - {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
   when: inventory_hostname in groups['kube-master']
 
-- set_fact:
+- name: Contiv | Add another manifest if contiv_enable_api_proxy is true
+  set_fact:
     contiv_manifests: |-
       {% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
       {{ contiv_manifests }}
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index 242834088881c329de90f4e6c0d69a17e983d38d..a8b149394cb7034ed0ab57e24388d63506dbdf81 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -21,7 +21,8 @@
   failed_when: false
   changed_when: false
 
-- set_fact:
+- name: Set if node needs cordoning
+  set_fact:
     needs_cordoning: >-
       {% if kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout -%}
       true
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 9ba68c0e15b242d40b12cedcaf5153d4db9eafd3..15f1c627fef701e1571ac2d034b05dd3b8819e7b 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -101,7 +101,8 @@
     ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
 
   tasks:
-    - set_fact:
+    - name: set etcd_access_addresses
+      set_fact:
         etcd_access_addresses: |-
           {% for item in groups['etcd'] -%}
             https://{{ item }}:2379{% if not loop.last %},{% endif %}
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 55932a408ceca589307f154a51d203be5eb48bf6..37fbafbd6106ff77ef50a4ee88f99cc6d20f34b2 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -57,7 +57,8 @@
     - name: show vars
       debug: msg="{{ cloud_region }}, {{ cloud_image }}"
 
-    - set_fact:
+    - name: set instance names
+      set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
           ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index 61b3d852f8bc93a7079109840b9ef06417bc32c5..2664810799bb83c2c327e182cabb9a8511303e54 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -16,7 +16,8 @@
       set_fact:
         test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
-    - set_fact:
+    - name: set instance names
+      set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index a5b4a6e4d631fb66a5c71bf4cc10ba35f4cd9cee..ba50f92e6772903fc570df24a4e09df40c63314e 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -10,7 +10,8 @@
       set_fact:
         test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
-    - set_fact:
+    - name: set instance names
+      set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index 39cec6f6a2dad3b639dedea8fc80e0e8b546b5b3..f1e3cbaca52ba8d6567e6b113a991f6605aac951 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -15,7 +15,8 @@
       set_fact:
         test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
 
-    - set_fact:
+    - name: Set file_name for logs
+      set_fact:
         file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
 
     - name: Create a bucket
diff --git a/tests/testcases/015_check-pods-running.yml b/tests/testcases/015_check-pods-running.yml
index 28c5d80167c32a4b9ec9aabc336f7ba90a62de72..c24e00aca23d325d3b0b469bfbd18913a13b5413 100644
--- a/tests/testcases/015_check-pods-running.yml
+++ b/tests/testcases/015_check-pods-running.yml
@@ -7,7 +7,8 @@
       bin_dir: "/opt/bin"
     when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
-  - set_fact:
+  - name: Force binaries directory for other hosts
+    set_fact:
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
@@ -16,7 +17,8 @@
     register: get_pods
     no_log: true
 
-  - debug: msg="{{ get_pods.stdout.split('\n') }}"
+  - debug:
+      msg: "{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
     shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
@@ -36,5 +38,6 @@
     register: get_pods
     no_log: true
 
-  - debug: msg="{{ get_pods.stdout.split('\n') }}"
+  - debug:
+      msg: "{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index c9d0f8c43bc533f8db0d395f31e174d7829049a2..6a1fa5c52b607a1b79399e63ec95aff615271545 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -10,7 +10,8 @@
       bin_dir: "/opt/bin"
     when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
-  - set_fact:
+  - name: Force binaries directory for other hosts
+    set_fact:
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
@@ -38,7 +39,8 @@
     register: pods
     no_log: true
 
-  - debug: msg="{{ pods.stdout.split('\n') }}"
+  - debug:
+      msg: "{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
   - name: Get hostnet pods
@@ -58,9 +60,11 @@
     register: get_pods
     no_log: true
 
-  - debug: msg="{{ get_pods.stdout.split('\n') }}"
+  - debug:
+      msg: "{{ get_pods.stdout.split('\n') }}"
 
-  - set_fact:
+  - name: Set networking facts
+    set_fact:
       kube_pods_subnet: 10.233.64.0/18
       pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}"
       pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
@@ -74,19 +78,25 @@
   - name: Check pods IP are in correct network
     assert:
       that: item | ipaddr(kube_pods_subnet)
-    when: not item in pods_hostnet and item in pods_running
+    when:
+    - not item in pods_hostnet
+    - item in pods_running
     with_items: "{{ pod_ips }}"
 
   - name: Ping between pods is working
     shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
-    when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
+    when:
+    - not item[0] in pods_hostnet
+    - not item[1] in pods_hostnet
     with_nested:
     - "{{ pod_names }}"
     - "{{ pod_ips }}"
 
   - name: Ping between hostnet pods is working
     shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
-    when: item[0] in pods_hostnet and item[1] in pods_hostnet
+    when:
+    - item[0] in pods_hostnet
+    - item[1] in pods_hostnet
     with_nested:
     - "{{ pod_names }}"
     - "{{ pod_ips }}"
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index c1264f8422a8080e85700f05b94a59ee74c0b9cf..fe4e552c741a2c28adeb2b404a6094c971ea39c7 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -19,7 +19,8 @@
         bin_dir: "/opt/bin"
       when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
-    - set_fact:
+    - name: Force binaries directory on other hosts
+      set_fact:
         bin_dir: "/usr/local/bin"
       when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
@@ -42,7 +43,8 @@
       delay: 10
       failed_when: false
 
-    - command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
+    - name: Get netchecker pods
+      command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
       run_once: true
       delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
@@ -51,12 +53,15 @@
         - netchecker-agent-hostnet
       when: not nca_pod is success
 
-    - debug: var=nca_pod.stdout_lines
+    - debug:
+        var: nca_pod.stdout_lines
       failed_when: not nca_pod is success
       run_once: true
 
     - name: Get netchecker agents
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/ return_content=yes
+      uri:
+        url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
+        return_content: yes
       run_once: true
       delegate_to: "{{ groups['kube-master'][0] }}"
       register: agents
@@ -68,7 +73,8 @@
       failed_when: false
       no_log: true
 
-    - debug: var=agents.content|from_json
+    - debug:
+        var: agents.content | from_json
       failed_when: not agents is success and not agents.content=='{}'
       run_once: true
       when:
@@ -77,7 +83,10 @@
         - agents.content[0] == '{'
 
     - name: Check netchecker status
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check status_code=200 return_content=yes
+      uri:
+        url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
+        status_code: 200
+        return_content: yes
       delegate_to: "{{ groups['kube-master'][0] }}"
       run_once: true
       register: result
@@ -90,17 +99,20 @@
       when:
         - agents.content != '{}'
 
-    - debug: var=ncs_pod
+    - debug:
+        var: ncs_pod
       run_once: true
       when: not result is success
 
-    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
+    - name: Get kube-proxy logs
+      command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
       run_once: true
       when: not result is success
       delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
 
-    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
+    - name: Get logs from other apps
+      command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
       run_once: true
       when: not result is success
       delegate_to: "{{ groups['kube-master'][0] }}"
@@ -115,7 +127,8 @@
         - calico-node
         - cilium
 
-    - debug: var=result.content|from_json
+    - debug:
+        var: result.content | from_json
       failed_when: not result is success
       run_once: true
       when:
@@ -123,13 +136,15 @@
         - result.content
         - result.content[0] == '{'
 
-    - debug: var=result
+    - debug:
+        var: result
       failed_when: not result is success
       run_once: true
       when:
         - not agents.content == '{}'
 
-    - debug: msg="Cannot get reports from agents, consider as PASSING"
+    - debug:
+        msg: "Cannot get reports from agents, consider as PASSING"
       run_once: true
       when:
         - agents.content == '{}'