diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 0b05f033586bf7de05bf77104b8e66b77eb84047..8818ffabaf5f915f0903f48b77bbff6171822c37 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,3 +1,4 @@
+---
 stages:
   - unit-tests
   - moderator
@@ -8,7 +9,7 @@ stages:
 variables:
   FAILFASTCI_NAMESPACE: 'kargo-ci'
   GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
-#  DOCKER_HOST: tcp://localhost:2375
+  # DOCKER_HOST: tcp://localhost:2375
   ANSIBLE_FORCE_COLOR: "true"
   MAGIC: "ci check this"
   TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
@@ -34,8 +35,8 @@ variables:
 # us-west1-a
 
 before_script:
-    - /usr/bin/python -m pip install -r tests/requirements.txt
-    - mkdir -p /.ssh
+  - /usr/bin/python -m pip install -r tests/requirements.txt
+  - mkdir -p /.ssh
 
 .job: &job
   tags:
@@ -45,7 +46,7 @@ before_script:
 
 .docker_service: &docker_service
   services:
-     - docker:dind
+    - docker:dind
 
 .create_cluster: &create_cluster
   <<: *job
@@ -232,95 +233,95 @@ before_script:
 
 # Test matrix. Leave the comments for markup scripts.
 .coreos_calico_aio_variables: &coreos_calico_aio_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
 
 .centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   UPGRADE_TEST: "graceful"
 
 .ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .coreos_cilium_variables: &coreos_cilium_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .rhel7_weave_variables: &rhel7_weave_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
 
 .centos7_flannel_addons_variables: &centos7_flannel_addons_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   MOVED_TO_GROUP_VARS: "true"
 
 .debian9_calico_variables: &debian9_calico_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   MOVED_TO_GROUP_VARS: "true"
 
 .coreos_canal_variables: &coreos_canal_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   MOVED_TO_GROUP_VARS: "true"
 
 .rhel7_canal_sep_variables: &rhel7_canal_sep_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .centos7_calico_ha_variables: &centos7_calico_ha_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .centos7_kube_router_variables: &centos7_kube_router_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .centos7_multus_calico_variables: &centos7_multus_calico_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   UPGRADE_TEST: "graceful"
 
 .coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .coreos_kube_router_variables: &coreos_kube_router_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
-# stage: deploy-part1
+  # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_flannel_variables: &ubuntu_flannel_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   MOVED_TO_GROUP_VARS: "true"
 
 .ubuntu_kube_router_variables: &ubuntu_kube_router_variables
-# stage: deploy-special
+  # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
 .opensuse_canal_variables: &opensuse_canal_variables
-# stage: deploy-part2
+  # stage: deploy-part2
   MOVED_TO_GROUP_VARS: "true"
 
 
@@ -727,7 +728,7 @@ yamllint:
   <<: *job
   stage: unit-tests
   script:
-    - yamllint roles
+    - yamllint .
   except: ['triggers', 'master']
 
 tox-inventory-builder:
diff --git a/_config.yml b/_config.yml
index c7418817439b2f071c93a4a6cee831e996123c0b..a2b6bf07ebd7f7eb768d3be1f9746b34d1c32af4 100644
--- a/_config.yml
+++ b/_config.yml
@@ -1 +1,2 @@
+---
 theme: jekyll-theme-slate
\ No newline at end of file
diff --git a/contrib/azurerm/roles/generate-templates/defaults/main.yml b/contrib/azurerm/roles/generate-templates/defaults/main.yml
index 8f5ab94a1a16fad6b4423465b52991f88c6237b0..9c34c2c8ec125ea9b84c12de2f990701febca7e4 100644
--- a/contrib/azurerm/roles/generate-templates/defaults/main.yml
+++ b/contrib/azurerm/roles/generate-templates/defaults/main.yml
@@ -1,3 +1,4 @@
+---
 apiVersion: "2015-06-15"
 
 virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
@@ -34,4 +35,3 @@ imageReferenceJson: "{{imageReference|to_json}}"
 
 storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
 storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
-
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 8b0789987538be72aa943da2c346ea4e5aa4332f..4ee6d858c4d0fd121380fb941b46d2d1733bb7ac 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -1,3 +1,4 @@
+---
 - set_fact:
     base_dir: "{{playbook_dir}}/.generated/"
 
diff --git a/contrib/dind/group_vars/all/all.yaml b/contrib/dind/group_vars/all/all.yaml
index 6e6223898e625fa3c16d897e7f7412d2a02bb23e..fd619a05e9f41c0581229677f47b810de7e3cfec 100644
--- a/contrib/dind/group_vars/all/all.yaml
+++ b/contrib/dind/group_vars/all/all.yaml
@@ -1,2 +1,3 @@
+---
 # See distro.yaml for supported node_distro images
 node_distro: debian
diff --git a/contrib/dind/group_vars/all/distro.yaml b/contrib/dind/group_vars/all/distro.yaml
index 354d962978d1492bccef46ee9917343d50c20ead..b9c2670e9c5ce55aac4e89cbe66645017243eacb 100644
--- a/contrib/dind/group_vars/all/distro.yaml
+++ b/contrib/dind/group_vars/all/distro.yaml
@@ -1,3 +1,4 @@
+---
 distro_settings:
   debian: &DEBIAN
     image: "debian:9.5"
diff --git a/contrib/dind/kubespray-dind.yaml b/contrib/dind/kubespray-dind.yaml
index 6386bcf31c885999feb5b6632bde838d13cc7ad3..ecfb5573a7032bf010429d65463c59f8e35bb738 100644
--- a/contrib/dind/kubespray-dind.yaml
+++ b/contrib/dind/kubespray-dind.yaml
@@ -1,3 +1,4 @@
+---
 # kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
 # See contrib/dind/README.md
 kube_api_anonymous_auth: true
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 60e330ac28b04990d3a817be69808c147fea69e8..affc99ea1818d94487e1092322243a9b5d62bec1 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -1,3 +1,4 @@
+---
 - name: set_fact distro_setup
   set_fact:
     distro_setup: "{{ distro_settings[node_distro] }}"
@@ -33,7 +34,7 @@
       # Delete docs
       path-exclude=/usr/share/doc/*
       path-include=/usr/share/doc/*/copyright
-    dest:  /etc/dpkg/dpkg.cfg.d/01_nodoc
+    dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
   when:
     - ansible_os_family == 'Debian'
 
@@ -55,7 +56,7 @@
   user:
     name: "{{ distro_user }}"
     uid: 1000
-    #groups: sudo
+    # groups: sudo
     append: yes
 
 - name: Allow password-less sudo to "{{ distro_user }}"
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index ee7d1a92f9e5f43442af058b857f73f3949c18d3..4212b3ed51f84f1f6f9ac464d4fc9328f7f76606 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -1,3 +1,4 @@
+---
 - name: set_fact distro_setup
   set_fact:
     distro_setup: "{{ distro_settings[node_distro] }}"
@@ -18,7 +19,7 @@
     state: started
     hostname: "{{ item }}"
     command: "{{ distro_init }}"
-    #recreate: yes
+    # recreate: yes
     privileged: true
     tmpfs:
       - /sys/module/nf_conntrack/parameters
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
index 24cb16284b381882d29d70552ed0a4c4bc6a44eb..e1df16ad2a2ad6a2381e20f40ade93b2288fa36a 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
@@ -2,8 +2,8 @@
 
 - name: Upgrade all packages to the latest version (yum)
   yum:
-   name: '*'
-   state: latest
+    name: '*'
+    state: latest
   when: ansible_os_family == "RedHat"
 
 - name: Install required packages
diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml
index 2e6ad7195b13d9892f9bf04640a6429dd7db0eef..e5b6f1301be1b4f03dcfb9455de8193628b8ac3e 100644
--- a/contrib/network-storage/glusterfs/glusterfs.yml
+++ b/contrib/network-storage/glusterfs/glusterfs.yml
@@ -4,7 +4,7 @@
   vars:
     ansible_ssh_pipelining: false
   roles:
-   - { role: bootstrap-os, tags: bootstrap-os}
+    - { role: bootstrap-os, tags: bootstrap-os}
 
 - hosts: all
   gather_facts: true
@@ -22,4 +22,3 @@
 - hosts: kube-master[0]
   roles:
     - { role: kubernetes-pv }
-
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
index c53f1715971f75c1fb9035407070a6605487e3c0..8d3513f029805ea26029534adec202114a1426b2 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/meta/main.yml
@@ -22,9 +22,9 @@ galaxy_info:
     - wheezy
     - jessie
   galaxy_tags:
-    - system
-    - networking
-    - cloud
-    - clustering
-    - files
-    - sharing
+  - system
+  - networking
+  - cloud
+  - clustering
+  - files
+  - sharing
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
index dc151284d933dc96a4b5e82b65f4aea8fe669c9a..e6c3dacb077b998df281885ff2703061412544a2 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/main.yml
@@ -12,5 +12,5 @@
 - name: Ensure Gluster mount directories exist.
   file: "path={{ item }} state=directory mode=0775"
   with_items:
-     - "{{ gluster_mount_dir }}"
+    - "{{ gluster_mount_dir }}"
   when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
index c53f1715971f75c1fb9035407070a6605487e3c0..8d3513f029805ea26029534adec202114a1426b2 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/meta/main.yml
@@ -22,9 +22,9 @@ galaxy_info:
     - wheezy
     - jessie
   galaxy_tags:
-    - system
-    - networking
-    - cloud
-    - clustering
-    - files
-    - sharing
+  - system
+  - networking
+  - cloud
+  - clustering
+  - files
+  - sharing
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 1ac2bb1744fcd082df1237119ca47d551c54521d..76116257c2ea40a143066f7ea53d52aab7d0bf5d 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -33,24 +33,24 @@
 - name: Ensure Gluster brick and mount directories exist.
   file: "path={{ item }} state=directory mode=0775"
   with_items:
-     - "{{ gluster_brick_dir }}"
-     - "{{ gluster_mount_dir }}"
+    - "{{ gluster_brick_dir }}"
+    - "{{ gluster_mount_dir }}"
 
 - name: Configure Gluster volume.
   gluster_volume:
-        state: present
-        name: "{{ gluster_brick_name }}"
-        brick: "{{ gluster_brick_dir }}"
-        replicas: "{{ groups['gfs-cluster'] | length }}"
-        cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
-        host: "{{ inventory_hostname }}"
-        force: yes
+    state: present
+    name: "{{ gluster_brick_name }}"
+    brick: "{{ gluster_brick_dir }}"
+    replicas: "{{ groups['gfs-cluster'] | length }}"
+    cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
+    host: "{{ inventory_hostname }}"
+    force: yes
   run_once: true
 
 - name: Mount glusterfs to retrieve disk size
   mount:
     name: "{{ gluster_mount_dir }}"
-    src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster" 
+    src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
     fstype: glusterfs
     opts: "defaults,_netdev"
     state: mounted
@@ -63,13 +63,13 @@
 
 - name: Set Gluster disk size to variable
   set_fact:
-     gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
+    gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Create file on GlusterFS
   template:
-      dest: "{{ gluster_mount_dir }}/.test-file.txt"
-      src: test-file.txt
+    dest: "{{ gluster_mount_dir }}/.test-file.txt"
+    src: test-file.txt
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Unmount glusterfs
@@ -79,4 +79,3 @@
     src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
     state: unmounted
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
-
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
index 76a11c2279564dd78e1bcc79d9dd0a095d3f97cc..2e108701aaf5fa30a83e2f9cb6837794bbc8a9f7 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
@@ -2,9 +2,9 @@
 - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
   template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
   with_items:
-          - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
-          - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
-          - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
+    - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
+    - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
+    - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
   register: gluster_pv
   when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
 
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml
index 2f22776f3dd912f2000dde53f2c094851e69d16b..a4ab33f5bbf2170452f62948f538a0cb8edbf484 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml
@@ -1,2 +1,3 @@
+---
 dependencies:
   - {role: kubernetes-pv/ansible, tags: apps}
diff --git a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
index e4db23365bf79e7493b4daa8ecfd364056919f00..dd5a69350bf64d70c5c273497e8915951d7ccd01 100644
--- a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
@@ -2,23 +2,23 @@
 - name: "Load lvm kernel modules"
   become: true
   with_items:
-      - "dm_snapshot"
-      - "dm_mirror"
-      - "dm_thin_pool"
+    - "dm_snapshot"
+    - "dm_mirror"
+    - "dm_thin_pool"
   modprobe:
-      name: "{{ item }}"
-      state: "present"
+    name: "{{ item }}"
+    state: "present"
 
 - name: "Install glusterfs mount utils (RedHat)"
   become: true
   yum:
-      name: "glusterfs-fuse"
-      state: "present"
+    name: "glusterfs-fuse"
+    state: "present"
   when: "ansible_os_family == 'RedHat'"
 
 - name: "Install glusterfs mount utils (Debian)"
   become: true
   apt:
-      name: "glusterfs-client"
-      state: "present"
+    name: "glusterfs-client"
+    state: "present"
   when: "ansible_os_family == 'Debian'"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
index 572913a63e0df630c42d1b0966d27d21da9090cf..0368f4e7b0af9af5dcd1f94943ce5d053ff66e9b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
@@ -1,3 +1,4 @@
+---
 # Bootstrap heketi
 - name: "Get state of heketi service, deployment and pods."
   register: "initial_heketi_state"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index 3037d8b77ca10ea9041a66faf6c3366cebb29998..ac5115a00fadef5eecbe0e33b0bdd8f1c15c83e6 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -18,7 +18,7 @@
     deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
   command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
   until:
-      - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
-      - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+    - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
+    - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
index d5da1a12588602b4818a822e1644ef357b2f0567..14ab97793991d5c7f2d39d693b89dde66a9636ea 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
@@ -38,4 +38,4 @@
   vars: { volume: "{{ volume_information.stdout|from_json }}" }
   when: "volume.name == 'heketidbstorage'"
 - name: "Ensure heketi database volume exists."
-  assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
+  assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 029baef947bb25e34855f80c8bf9d3e37b480374..2052abefcaf5b0f31dc98c453e3e4d2832ed96ed 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -18,8 +18,8 @@
     deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
   command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
   until:
-      - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
-      - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+    - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
+    - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
   retries: 60
   delay: 5
 - set_fact:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index afd818eb31bf93a6e852ead15e5f89df307319ae..f878876bc11c5d953879e71b475a843340ce309c 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -8,7 +8,7 @@
   register: "heketi_service"
   changed_when: false
 - name: "Ensure heketi service is available."
-  assert: { that: "heketi_service.stdout != \"\""  }
+  assert: { that: "heketi_service.stdout != \"\"" }
 - name: "Render storage class configuration."
   become: true
   vars:
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
index 01e03660c4def2564a782c4ebb1e5fc38b18b02e..c1816969413bc52295a20257e9d4d136f670f3e1 100644
--- a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -2,15 +2,15 @@
 - name: "Install lvm utils (RedHat)"
   become: true
   yum:
-      name: "lvm2"
-      state: "present"
+    name: "lvm2"
+    state: "present"
   when: "ansible_os_family == 'RedHat'"
 
 - name: "Install lvm utils (Debian)"
   become: true
   apt:
-      name: "lvm2"
-      state: "present"
+    name: "lvm2"
+    state: "present"
   when: "ansible_os_family == 'Debian'"
 
 - name: "Get volume group information."
@@ -34,13 +34,13 @@
 - name: "Remove lvm utils (RedHat)"
   become: true
   yum:
-      name: "lvm2"
-      state: "absent"
+    name: "lvm2"
+    state: "absent"
   when: "ansible_os_family == 'RedHat'"
 
 - name: "Remove lvm utils (Debian)"
   become: true
   apt:
-      name: "lvm2"
-      state: "absent"
+    name: "lvm2"
+    state: "absent"
   when: "ansible_os_family == 'Debian'"
diff --git a/contrib/vault/groups_vars/vault.yaml b/contrib/vault/groups_vars/vault.yaml
index c59c123b2c80eed2207cd59a9747816cbbd77d4b..420e75081035cb6d4aa4a24b994609703dca5f5b 100644
--- a/contrib/vault/groups_vars/vault.yaml
+++ b/contrib/vault/groups_vars/vault.yaml
@@ -1,3 +1,4 @@
+---
 vault_deployment_type: docker
 vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
 vault_version: 0.10.1
diff --git a/docs/calico_peer_example/new-york.yml b/docs/calico_peer_example/new-york.yml
index ce9d953963b9f6ee28ab21164ae582b6c3979232..dd4d8125b7bf6cb1f0da9bff8e6fbefb7f5c472d 100644
--- a/docs/calico_peer_example/new-york.yml
+++ b/docs/calico_peer_example/new-york.yml
@@ -1,10 +1,10 @@
-#---
-#peers:
-#  -router_id: "10.99.0.34"
-#   as: "65xxx"
-#  - router_id: "10.99.0.35"
-#   as: "65xxx"
-#
-#loadbalancer_apiserver:
-#  address: "10.99.0.44"
-#  port: "8383"
+# ---
+# peers:
+#   - router_id: "10.99.0.34"
+#     as: "65xxx"
+#   - router_id: "10.99.0.35"
+#     as: "65xxx"
+
+# loadbalancer_apiserver:
+#   address: "10.99.0.44"
+#   port: "8383"
diff --git a/docs/calico_peer_example/paris.yml b/docs/calico_peer_example/paris.yml
index e8b34ae0d24bf78101a596a8990089b9d80dfcb0..6d4ab417971e36af77c499ce5e93f0f94df4e45b 100644
--- a/docs/calico_peer_example/paris.yml
+++ b/docs/calico_peer_example/paris.yml
@@ -1,10 +1,10 @@
-#---
-#peers:
-#  -router_id: "10.99.0.2"
-#   as: "65xxx"
-#  - router_id: "10.99.0.3"
-#   as: "65xxx"
-#
-#loadbalancer_apiserver:
-#  address: "10.99.0.21"
-#  port: "8383"
+# ---
+# peers:
+#   - router_id: "10.99.0.2"
+#     as: "65xxx"
+#   - router_id: "10.99.0.3"
+#     as: "65xxx"
+
+# loadbalancer_apiserver:
+#   address: "10.99.0.21"
+#   port: "8383"
diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml
index c064bda3950400734b363b197691812af53bfb5c..5bffa137e7339cff1a697722e8a7b148d4a8477d 100644
--- a/extra_playbooks/build-cephfs-provisioner.yml
+++ b/extra_playbooks/build-cephfs-provisioner.yml
@@ -40,7 +40,7 @@
             version: 06fddbe2
             clone: yes
             update: yes
-            
+
         - name: CephFS Provisioner | Build image
           shell: |
             cd ~/go/src/github.com/kubernetes-incubator/external-storage
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 9cae3e85b48c4ed19db59e0293ce746c54516850..ce3927f09a6c6f1e680f701491fc759604346027 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -1,3 +1,4 @@
+---
 ### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
 ### Additional information:
 ### * Will not upgrade etcd
@@ -38,8 +39,8 @@
     - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
 
-#Handle upgrades to master components first to maintain backwards compat.
-- hosts: kube-master
+- name: Handle upgrades to master components first to maintain backwards compat.
+  hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
   roles:
@@ -51,8 +52,8 @@
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
-#Finally handle worker upgrades, based on given batch size
-- hosts: kube-node:!kube-master
+- name: Finally handle worker upgrades, based on given batch size
+  hosts: kube-node:!kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index bcb34be4cbb712e5200414c38b0b8a1297d27ba3..9932cf32cd1d52d76e2659553d4f51976d4ce1b4 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -1,3 +1,4 @@
+---
 ## Directory where etcd data stored
 etcd_data_dir: /var/lib/etcd
 
@@ -9,17 +10,17 @@ bin_dir: /usr/local/bin
 ## this node for example.  The access_ip is really useful AWS and Google
 ## environments where the nodes are accessed remotely by the "public" ip,
 ## but don't know about that address themselves.
-#access_ip: 1.1.1.1
+# access_ip: 1.1.1.1
 
 
 ## External LB example config
 ## apiserver_loadbalancer_domain_name: "elb.some.domain"
-#loadbalancer_apiserver:
-#  address: 1.2.3.4
-#  port: 1234
+# loadbalancer_apiserver:
+#   address: 1.2.3.4
+#   port: 1234
 
 ## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
+# loadbalancer_apiserver_localhost: true
 
 ## Local loadbalancer should use this port
 ## And must be set port 6443
@@ -32,12 +33,12 @@ nginx_kube_apiserver_healthcheck_port: 8081
 ## for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
 ## processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
 ## modules.
-#kubelet_load_modules: false
+# kubelet_load_modules: false
 
 ## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-#  - 8.8.8.8
-#  - 8.8.4.4
+# upstream_dns_servers:
+#   - 8.8.8.8
+#   - 8.8.4.4
 
 ## There are some changes specific to the cloud providers
 ## for instance we need to encapsulate packets with some network plugins
@@ -46,43 +47,43 @@ nginx_kube_apiserver_healthcheck_port: 8081
 ## like you would do when using openstack-client before starting the playbook.
 ## Note: The 'external' cloud provider is not supported.
 ## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
-#cloud_provider:
+# cloud_provider:
 
 ## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
+# http_proxy: ""
+# https_proxy: ""
 
 ## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
+# no_proxy: ""
 
 ## Some problems may occur when downloading files over https proxy due to ansible bug
 ## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
 ## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
-#download_validate_certs: False
+# download_validate_certs: False
 
 ## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
-#additional_no_proxy: ""
+# additional_no_proxy: ""
 
 ## Certificate Management
 ## This setting determines whether certs are generated via scripts.
 ## Chose 'none' if you provide your own certificates.
 ## Option is  "script", "none"
 ## note: vault is removed
-#cert_management: script
+# cert_management: script
 
 ## Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
+# ignore_assert_errors: false
 
 ## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
-#kube_read_only_port: 10255
+# kube_read_only_port: 10255
 
 ## Set true to download and cache container
-#download_container: true
+# download_container: true
 
 ## Deploy container engine
 # Set false if you want to deploy container engine manually.
-#deploy_container_engine: true
+# deploy_container_engine: true
 
 ## Set Pypi repo and cert accordingly
-#pyrepo_index: https://pypi.example.com/simple
-#pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
+# pyrepo_index: https://pypi.example.com/simple
+# pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
diff --git a/inventory/sample/group_vars/all/azure.yml b/inventory/sample/group_vars/all/azure.yml
index 78d49c9b4d4857ed09c8e3b23bcc58d7337fcc7f..d6ddd6de7e47a9b217c96e3840f6f1958bf596bb 100644
--- a/inventory/sample/group_vars/all/azure.yml
+++ b/inventory/sample/group_vars/all/azure.yml
@@ -1,14 +1,14 @@
 ## When azure is used, you need to also set the following variables.
 ## see docs/azure.md for details on how to get these values
 
-#azure_tenant_id:
-#azure_subscription_id:
-#azure_aad_client_id:
-#azure_aad_client_secret:
-#azure_resource_group:
-#azure_location:
-#azure_subnet_name:
-#azure_security_group_name:
-#azure_vnet_name:
-#azure_vnet_resource_group:
-#azure_route_table_name:
+# azure_tenant_id:
+# azure_subscription_id:
+# azure_aad_client_id:
+# azure_aad_client_secret:
+# azure_resource_group:
+# azure_location:
+# azure_subnet_name:
+# azure_security_group_name:
+# azure_vnet_name:
+# azure_vnet_resource_group:
+# azure_route_table_name:
diff --git a/inventory/sample/group_vars/all/coreos.yml b/inventory/sample/group_vars/all/coreos.yml
index a48f24ebbc3639f5d906e45393adb1bf170a655b..22c21666304260912df1c5700359ce8082989e72 100644
--- a/inventory/sample/group_vars/all/coreos.yml
+++ b/inventory/sample/group_vars/all/coreos.yml
@@ -1,2 +1,2 @@
 ## Does coreos need auto upgrade, default is true
-#coreos_auto_upgrade: true
+# coreos_auto_upgrade: true
diff --git a/inventory/sample/group_vars/all/docker.yml b/inventory/sample/group_vars/all/docker.yml
index b7e93c14d8d8fdad89b4a7c65409ffd527d13657..f81e6b08dca93892bff9ea920f78bd7da3b63347 100644
--- a/inventory/sample/group_vars/all/docker.yml
+++ b/inventory/sample/group_vars/all/docker.yml
@@ -1,13 +1,14 @@
+---
 ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
 ## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
+# docker_storage_options: -s overlay2
 
 ## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
 docker_container_storage_setup: false
 
 ## It must be define a disk path for docker_container_storage_setup_devs.
 ## Otherwise docker-storage-setup will be executed incorrectly.
-#docker_container_storage_setup_devs: /dev/vdb
+# docker_container_storage_setup_devs: /dev/vdb
 
 ## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
 docker_dns_servers_strict: false
@@ -32,12 +33,12 @@ docker_rpm_keepcache: 0
 ## An obvious use case is allowing insecure-registry access to self hosted registries.
 ## Can be ipaddress and domain_name.
 ## example define 172.19.16.11 or mirror.registry.io
-#docker_insecure_registries:
+# docker_insecure_registries:
 #   - mirror.registry.io
 #   - 172.19.16.11
 
 ## Add other registry,example China registry mirror.
-#docker_registry_mirrors:
+# docker_registry_mirrors:
 #   - https://registry.docker-cn.com
 #   - https://mirror.aliyuncs.com
 
@@ -46,7 +47,7 @@ docker_rpm_keepcache: 0
 ## or private, which control whether mounts in the file system
 ## namespace set up for docker will receive or propagate mounts
 ## and unmounts. Leave empty for system default
-#docker_mount_flags:
+# docker_mount_flags:
 
 ## A string of extra options to pass to the docker daemon.
 ## This string should be exactly as you wish it to appear.
diff --git a/inventory/sample/group_vars/all/oci.yml b/inventory/sample/group_vars/all/oci.yml
index ee61fbb5755f2882c69358cc5a219c8b9096fcbd..00d9bef6ed78ca7b925985500aae7dfd85d9c758 100644
--- a/inventory/sample/group_vars/all/oci.yml
+++ b/inventory/sample/group_vars/all/oci.yml
@@ -1,28 +1,28 @@
 ## When Oracle Cloud Infrastructure is used, set these variables
-#oci_private_key:
-#oci_region_id:
-#oci_tenancy_id:
-#oci_user_id:
-#oci_user_fingerprint:
-#oci_compartment_id:
-#oci_vnc_id:
-#oci_subnet1_id:
-#oci_subnet2_id:
+# oci_private_key:
+# oci_region_id:
+# oci_tenancy_id:
+# oci_user_id:
+# oci_user_fingerprint:
+# oci_compartment_id:
+# oci_vnc_id:
+# oci_subnet1_id:
+# oci_subnet2_id:
 ## Overide these default/optional behaviors if you wish
-#oci_security_list_management: All
-# If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. 
-#oci_security_lists:
-  #ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
-  #ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
-# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
-#oci_use_instance_principals: false
-#oci_cloud_controller_version: 0.6.0
-# If you would like to control OCI query rate limits for the controller 
-#oci_rate_limit:
-  #rate_limit_qps_read:
-  #rate_limit_qps_write:
-  #rate_limit_bucket_read:
-  #rate_limit_bucket_write:
-# Other optional variables
-#oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
-#oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
+# oci_security_list_management: All
+## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
+# oci_security_lists:
+#   ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
+#   ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
+## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
+# oci_use_instance_principals: false
+# oci_cloud_controller_version: 0.6.0
+## If you would like to control OCI query rate limits for the controller
+# oci_rate_limit:
+#   rate_limit_qps_read:
+#   rate_limit_qps_write:
+#   rate_limit_bucket_read:
+#   rate_limit_bucket_write:
+## Other optional variables
+# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
+# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
diff --git a/inventory/sample/group_vars/all/openstack.yml b/inventory/sample/group_vars/all/openstack.yml
index ae5dae2cf289dbf8586dd0a27ac8152a500cda53..790d4e687e08985f6d3c9e554074431a8e994547 100644
--- a/inventory/sample/group_vars/all/openstack.yml
+++ b/inventory/sample/group_vars/all/openstack.yml
@@ -1,16 +1,16 @@
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-#openstack_blockstorage_ignore_volume_az: yes
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-## To enable automatic floating ip provisioning, specify a subnet.
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-## Override default LBaaS behavior
-#openstack_lbaas_use_octavia: False
-#openstack_lbaas_method: "ROUND_ROBIN"
-#openstack_lbaas_provider: "haproxy"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
+# # When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
+# openstack_blockstorage_version: "v1/v2/auto (default)"
+# openstack_blockstorage_ignore_volume_az: yes
+# # When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
+# openstack_lbaas_enabled: True
+# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+# # To enable automatic floating ip provisioning, specify a subnet.
+# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+# # Override default LBaaS behavior
+# openstack_lbaas_use_octavia: False
+# openstack_lbaas_method: "ROUND_ROBIN"
+# openstack_lbaas_provider: "haproxy"
+# openstack_lbaas_create_monitor: "yes"
+# openstack_lbaas_monitor_delay: "1m"
+# openstack_lbaas_monitor_timeout: "30s"
+# openstack_lbaas_monitor_max_retries: "3"
diff --git a/inventory/sample/group_vars/etcd.yml b/inventory/sample/group_vars/etcd.yml
index 6f5347cb956c08b07f9aca0b312fa817a1a97f98..d2335c66eb6f648e94473a8498ab8f7d50b9d85c 100644
--- a/inventory/sample/group_vars/etcd.yml
+++ b/inventory/sample/group_vars/etcd.yml
@@ -1,18 +1,18 @@
 ## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
+# etcd_compaction_retention: 0
 
 ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
+# etcd_metrics: basic
 
 ## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
 ## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
-#etcd_memory_limit: "512M"
+# etcd_memory_limit: "512M"
 
 ## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
 ## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
 ## etcd documentation for more information.
-#etcd_quota_backend_bytes: "2G"
+# etcd_quota_backend_bytes: "2G"
 
 ### ETCD: disable peer client cert authentication.
 # This affects ETCD_PEER_CLIENT_CERT_AUTH variable
-#etcd_peer_client_auth: true
+# etcd_peer_client_auth: true
diff --git a/inventory/sample/group_vars/k8s-cluster/addons.yml b/inventory/sample/group_vars/k8s-cluster/addons.yml
index ee6bdfc3ace5833f87819b721bba8c3f98ab78d5..7f83e3876a80b0b891ec727b1e4fef9b07b054d6 100644
--- a/inventory/sample/group_vars/k8s-cluster/addons.yml
+++ b/inventory/sample/group_vars/k8s-cluster/addons.yml
@@ -1,3 +1,4 @@
+---
 # Kubernetes dashboard
 # RBAC required. see docs/getting-started.md for access details.
 dashboard_enabled: true
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
index 3cac5fca022b7e9d326f704a10c49c2ceafc0215..5eaf6d7de9ad4a9f1f51ad24a005b1b25c61462a 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
@@ -1,3 +1,4 @@
+---
 # Kubernetes configuration dirs and system namespace.
 # Those are where all the additional config stuff goes
 # the kubernetes normally puts in /srv/kubernetes.
@@ -51,9 +52,9 @@ kube_users:
       - system:masters
 
 ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-#kube_basic_auth: false
-#kube_token_auth: false
+# kube_oidc_auth: false
+# kube_basic_auth: false
+# kube_token_auth: false
 
 
 ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
@@ -91,10 +92,10 @@ kube_network_node_prefix: 24
 
 # The port the API Server will be listening on.
 kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-#kube_apiserver_insecure_port: 8080 # (http)
+kube_apiserver_port: 6443  # (https)
+# kube_apiserver_insecure_port: 8080  # (http)
 # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
-kube_apiserver_insecure_port: 0 # (disabled)
+kube_apiserver_insecure_port: 0  # (disabled)
 
 # Kube-proxy proxyMode configuration.
 # Can be ipvs, iptables
@@ -112,11 +113,11 @@ kube_proxy_nodeport_addresses: >-
   {%- endif -%}
 
 # If non-empty, will use this string as identification instead of the actual hostname
-#kube_override_hostname: >-
-#  {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
-#  {%- else -%}
-#  {{ inventory_hostname }}
-#  {%- endif -%}
+# kube_override_hostname: >-
+#   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+#   {%- else -%}
+#   {{ inventory_hostname }}
+#   {%- endif -%}
 
 ## Encrypting Secret Data at Rest (experimental)
 kube_encrypt_secret_data: false
@@ -129,7 +130,7 @@ ndots: 2
 # Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
 dns_mode: coredns
 # Set manual server if using a custom cluster DNS server
-#manual_dns_server: 10.x.x.x
+# manual_dns_server: 10.x.x.x
 # Enable nodelocal dns cache
 enable_nodelocaldns: False
 nodelocaldns_ip: 169.254.25.10
@@ -163,7 +164,7 @@ kubernetes_audit: false
 dynamic_kubelet_configuration: false
 
 # define kubelet config dir for dynamic kubelet
-#kubelet_config_dir:
+# kubelet_config_dir:
 default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
 dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
 
@@ -177,8 +178,8 @@ podsecuritypolicy_enabled: false
 
 # dnsmasq
 # dnsmasq_upstream_dns_servers:
-#  - /resolvethiszone.with/10.0.4.250
-#  - 8.8.8.8
+#   - /resolvethiszone.with/10.0.4.250
+#   - 8.8.8.8
 
 #  Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
 # kubelet_cgroups_per_qos: true
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
index 6ca30a24448ab97eb20cf970ae417c8eff0684be..9d2654c762a5f83f08c192a7a6cf4615f55bc6c3 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
@@ -3,7 +3,7 @@
 ## With calico it is possible to distributed routes with border routers of the datacenter.
 ## Warning : enabling router peering will disable calico's default behavior ('node mesh').
 ## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
+# peer_with_router: false
 
 # Enables Internet connectivity from containers
 # nat_outgoing: true
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml
index 5c78b5d70d149dce4e72088b919e0204160835ad..24deb99d10db593d461f0332e22cb05c69227ac9 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml
@@ -8,4 +8,3 @@
 # Whether or not to masquerade traffic to destinations not within
 # the pod network.
 # canal_masquerade: "true"
-
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml
index af27368ecadf965f698c01d50da6cbb61a32bec6..1ec51cb421366843414073a7f227f71471d53de2 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml
@@ -6,9 +6,9 @@
 ## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
 ## In this case, you may need to peer with an uplink
 ## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
-#contiv_peer_with_uplink_leaf: false
-#contiv_global_as: "65002"
-#contiv_global_neighbor_as: "500"
+# contiv_peer_with_uplink_leaf: false
+# contiv_global_as: "65002"
+# contiv_global_neighbor_as: "500"
 
 # Fabric mode: aci, aci-opflex or default
 # contiv_fabric_mode: default
diff --git a/mitogen.yaml b/mitogen.yaml
index 5cae114ff78bb70c4e2e4959c1d652334b49d0f0..fa9d4ec54dc2efe066942505f9598ca55ac081c2 100644
--- a/mitogen.yaml
+++ b/mitogen.yaml
@@ -1,3 +1,4 @@
+---
 - hosts: localhost
   strategy: linear
   vars:
diff --git a/scale.yml b/scale.yml
index 4dcbeda610d2a686f4056ebc50f0a63d9c2b9084..9809eb0680a0066dcbc41fd78f02cfff81c7009f 100644
--- a/scale.yml
+++ b/scale.yml
@@ -20,8 +20,8 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-##Bootstrap any new workers
-- hosts: kube-node
+- name: Bootstrap any new workers
+  hosts: kube-node
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
   vars:
@@ -30,22 +30,22 @@
     - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
-##We still have to gather facts about our masters and etcd nodes
-- hosts: k8s-cluster:etcd:calico-rr
+- name: Gather facts about our masters and etcd nodes
+  hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   vars:
     ansible_ssh_pipelining: true
   gather_facts: true
 
-##We need to generate the etcd certificates beforhand
-- hosts: etcd
+- name: Generate the etcd certificates beforehand
+  hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-  - { role: kubespray-defaults}
-  - { role: etcd, tags: etcd, etcd_cluster_setup: false }
+    - { role: kubespray-defaults}
+    - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
-##Target only workers to get kubelet installed and checking in on any new nodes
-- hosts: kube-node
+- name: Target only workers to get kubelet installed and checking in on any new nodes
+  hosts: kube-node
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/test-infra/image-builder/cluster.yml b/test-infra/image-builder/cluster.yml
index 00e0d541e734f7788167be53a9fae2ce3369fd72..a25de7ff4cf8aae398a9ce67de6f70045566e0bc 100644
--- a/test-infra/image-builder/cluster.yml
+++ b/test-infra/image-builder/cluster.yml
@@ -1,4 +1,5 @@
- - hosts: image-builder
-   gather_facts: false
-   roles:
-     - kubevirt-images
+---
+- hosts: image-builder
+  gather_facts: false
+  roles:
+    - kubevirt-images
diff --git a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
index 717fa605c8759ebcb6b95e0b3d5925aacbc87910..ed3407a9335f6616a58472da9d58b1d63c6bc6f3 100644
--- a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml
@@ -1,3 +1,4 @@
+---
 images_dir: /images/base
 
 docker_user: kubespray+buildvmimages
@@ -5,32 +6,32 @@ docker_host: quay.io
 registry: quay.io/kubespray
 
 images:
-    ubuntu-1604:
-      filename: xenial-server-cloudimg-amd64-disk1.img
-      url: https://storage.googleapis.com/kubespray-images/ubuntu/xenial-server-cloudimg-amd64-disk1.img
-      checksum: sha256:c0d099383cd064390b568e20d1c39a9c68ba864764404b70f754a7b1b2f808f7
-      converted: false
+  ubuntu-1604:
+    filename: xenial-server-cloudimg-amd64-disk1.img
+    url: https://storage.googleapis.com/kubespray-images/ubuntu/xenial-server-cloudimg-amd64-disk1.img
+    checksum: sha256:c0d099383cd064390b568e20d1c39a9c68ba864764404b70f754a7b1b2f808f7
+    converted: false
 
-    ubuntu-1804:
-      filename: bionic-server-cloudimg-amd64.img
-      url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
-      checksum: sha256:c3d0e03f4245ffaabd7647e6dabf346b944a62b9934d0a89f3a04b4236386af2
-      converted: false
+  ubuntu-1804:
+    filename: bionic-server-cloudimg-amd64.img
+    url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
+    checksum: sha256:c3d0e03f4245ffaabd7647e6dabf346b944a62b9934d0a89f3a04b4236386af2
+    converted: false
 
-    fedora-28:
-      filename: Fedora-Cloud-Base-28-1.1.x86_64.qcow2
-      url: https://mirror.netsite.dk/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-Base-28-1.1.x86_64.qcow2
-      checksum: sha256:d987209719fadaf81b8bff85c5d3590a1d3dac6357e4838fde8357086c49b5b4
-      converted: true
+  fedora-28:
+    filename: Fedora-Cloud-Base-28-1.1.x86_64.qcow2
+    url: https://mirror.netsite.dk/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-Base-28-1.1.x86_64.qcow2
+    checksum: sha256:d987209719fadaf81b8bff85c5d3590a1d3dac6357e4838fde8357086c49b5b4
+    converted: true
 
-    centos-7:
-      filename: CentOS-7-x86_64-GenericCloud-1809.qcow2
-      url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2
-      checksum: sha256:42c062df8a8c36991ec0282009dd52ac488461a3f7ee114fc21a765bfc2671c2
-      converted: true
+  centos-7:
+    filename: CentOS-7-x86_64-GenericCloud-1809.qcow2
+    url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2
+    checksum: sha256:42c062df8a8c36991ec0282009dd52ac488461a3f7ee114fc21a765bfc2671c2
+    converted: true
 
-    debian-9:
-      filename: debian-9-openstack-amd64.qcow2
-      url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
-      checksum: sha256:01d9345ba7a6523d214d2eaabe07fe7b4b69b28e63d7a6b322521e99e5768719
-      converted: true
+  debian-9:
+    filename: debian-9-openstack-amd64.qcow2
+    url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+    checksum: sha256:01d9345ba7a6523d214d2eaabe07fe7b4b69b28e63d7a6b322521e99e5768719
+    converted: true
diff --git a/tests/files/do_ubuntu-canal-ha.yml b/tests/files/do_ubuntu-canal-ha.yml
index ecd9ed428c75dd2ccd49e127e604de34c4d0b369..9152986986666954f6c49009b63b3a72673ea01d 100644
--- a/tests/files/do_ubuntu-canal-ha.yml
+++ b/tests/files/do_ubuntu-canal-ha.yml
@@ -1,3 +1,4 @@
+---
 cloud_image: ubuntu-16-04-x64
 cloud_region: nyc3
 mode: ha
diff --git a/tests/files/gce_centos-weave-kubeadm-sep.yml b/tests/files/gce_centos-weave-kubeadm-sep.yml
index 9e2586438ee71f79eacc4f917ec56d025c3187e5..84b604f1be346734bb48f18e239f431b74731084 100644
--- a/tests/files/gce_centos-weave-kubeadm-sep.yml
+++ b/tests/files/gce_centos-weave-kubeadm-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_machine_type: "n1-standard-1"
diff --git a/tests/files/gce_centos7-calico-ha.yml b/tests/files/gce_centos7-calico-ha.yml
index d45d75b8ecd48d09eeec9032bb5920ea63f85ba9..58584c7c6d004d8d69800746dc453f376f480111 100644
--- a/tests/files/gce_centos7-calico-ha.yml
+++ b/tests/files/gce_centos7-calico-ha.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_region: us-central1-c
diff --git a/tests/files/gce_centos7-cilium.yml b/tests/files/gce_centos7-cilium.yml
index 85e13a1c37cb6641dea8aac2774558684eb5e5aa..f945ad5adeb1601825f8017b0764b6ba3ee71851 100644
--- a/tests/files/gce_centos7-cilium.yml
+++ b/tests/files/gce_centos7-cilium.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_region: us-central1-c
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index c073d3d67ff4a7ca25482115607dd289dc8ea1cb..5ff0f3045ff11cae686cb39973a5be12a4c5705b 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_region: us-central1-c
@@ -15,7 +16,7 @@ deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
 kube_encrypt_secret_data: true
-#ingress_nginx_enabled: true
+# ingress_nginx_enabled: true
 cert_manager_enabled: true
 metrics_server_enabled: true
 kube_token_auth: true
diff --git a/tests/files/gce_centos7-kube-router.yml b/tests/files/gce_centos7-kube-router.yml
index a7349ad4c3b4133afa80c75a44e91ec0bd664fb7..1bdc49484df61f5ca0bad2ec43444997e32daa57 100644
--- a/tests/files/gce_centos7-kube-router.yml
+++ b/tests/files/gce_centos7-kube-router.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_region: us-central1-c
diff --git a/tests/files/gce_centos7-multus-calico.yml b/tests/files/gce_centos7-multus-calico.yml
index 146f9401bb127f7732c62e20d86daca8421de397..31c6fd5ee8a9f8e6c457d46ebeceb4fcefb90ba5 100644
--- a/tests/files/gce_centos7-multus-calico.yml
+++ b/tests/files/gce_centos7-multus-calico.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: centos-7
 cloud_region: us-central1-c
diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml
index 7de3b43ec4414fae855ee04352dbdfdaad1e5e75..4c91b251e814bec7b42998d1b21d6c9c8ded5f58 100644
--- a/tests/files/gce_coreos-alpha-weave-ha.yml
+++ b/tests/files/gce_coreos-alpha-weave-ha.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: coreos-alpha
 cloud_region: us-central1-a
@@ -7,7 +8,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 kube_network_plugin: weave
-resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
+resolvconf_mode: host_resolvconf  # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_coreos-calico-aio.yml b/tests/files/gce_coreos-calico-aio.yml
index 51a7c686d30ebb78b84eb6776d119187308c1246..bb60e7afc2b973fd23ab72141489d41108fd6979 100644
--- a/tests/files/gce_coreos-calico-aio.yml
+++ b/tests/files/gce_coreos-calico-aio.yml
@@ -1,15 +1,16 @@
+---
 # Instance settings
 cloud_image_family: coreos-stable
 cloud_region: us-central1-a
 cloud_machine_type: "n1-standard-2"
 mode: aio
-##user-data to simply turn off coreos upgrades
+## user-data to simply turn off coreos upgrades
 startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 no_group_vars: true
 kube_network_plugin: calico
-resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
+resolvconf_mode: host_resolvconf  # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_coreos-canal.yml b/tests/files/gce_coreos-canal.yml
index ffeddc29c1b19320176896b26a043f4dd3c542dd..f18f2f36d267cc63447629476af5d2520552034b 100644
--- a/tests/files/gce_coreos-canal.yml
+++ b/tests/files/gce_coreos-canal.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: coreos-stable
 cloud_region: us-central1-c
@@ -6,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 kube_network_plugin: canal
-resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
+resolvconf_mode: host_resolvconf  # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_coreos-cilium.yml b/tests/files/gce_coreos-cilium.yml
index 6cf7358a1e95e19428f3fd89c3f22c0aaecf939e..08848cea62f1dc73599d1fdec2b211d2601d752f 100644
--- a/tests/files/gce_coreos-cilium.yml
+++ b/tests/files/gce_coreos-cilium.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: coreos-stable
 cloud_region: us-central1-c
@@ -6,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 kube_network_plugin: cilium
-resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
+resolvconf_mode: host_resolvconf  # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
 enable_network_policy: true
 dns_min_replicas: 1
diff --git a/tests/files/gce_coreos-kube-router.yml b/tests/files/gce_coreos-kube-router.yml
index cc3d41b8ace9bdecbc01004621b968d04436bc1d..6c37559915675fc498866d3ed24b5365719f6f52 100644
--- a/tests/files/gce_coreos-kube-router.yml
+++ b/tests/files/gce_coreos-kube-router.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: coreos-stable
 cloud_region: us-central1-c
@@ -8,7 +9,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 # Deployment settings
 kube_network_plugin: kube-router
 bootstrap_os: coreos
-resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
+resolvconf_mode: host_resolvconf  # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_debian9-calico-upgrade.yml b/tests/files/gce_debian9-calico-upgrade.yml
index b129904ff0dadb60c2c632975438f9f6b581194a..d634a6f33c045b15a2325b44470a3422d1daf943 100644
--- a/tests/files/gce_debian9-calico-upgrade.yml
+++ b/tests/files/gce_debian9-calico-upgrade.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image: debian-9-kubespray
 cloud_region: us-central1-b
diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml
index 26b6415f5916dcb8a8c4f58b2fa121461d4eed02..4c83ba57902620c9728fc8a8dffc25bad7eabf4f 100644
--- a/tests/files/gce_opensuse-canal.yml
+++ b/tests/files/gce_opensuse-canal.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: opensuse-leap
 cloud_region: us-central1-c
diff --git a/tests/files/gce_rhel7-canal-sep.yml b/tests/files/gce_rhel7-canal-sep.yml
index 57e89189631d7622389743d642edefeb02a2b28b..42e07b1e6d6ada4ae938e8fd8e76c86be1dc8f58 100644
--- a/tests/files/gce_rhel7-canal-sep.yml
+++ b/tests/files/gce_rhel7-canal-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: rhel-7
 cloud_region: us-central1-a
diff --git a/tests/files/gce_rhel7-cilium.yml b/tests/files/gce_rhel7-cilium.yml
index 96be18debbaa755caac1a93e21d33bcc3d9aa234..7a00b1b7f7d7cd404b9c601ad95f824c3af1bbd2 100644
--- a/tests/files/gce_rhel7-cilium.yml
+++ b/tests/files/gce_rhel7-cilium.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: rhel-7
 cloud_region: us-central1-b
diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml
index ad5d61c4f9fb7e1f94dcf9c1d618a2091a2234bf..723b2289fa21436d05f3830700b403c453a20ea2 100644
--- a/tests/files/gce_rhel7-weave.yml
+++ b/tests/files/gce_rhel7-weave.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: rhel-7
 cloud_region: us-central1-b
diff --git a/tests/files/gce_ubuntu-canal-ha.yml b/tests/files/gce_ubuntu-canal-ha.yml
index 419c0426f6642f0ab2f7d5adbec211a78117b790..c1b7c6df87e93e8a99f015731f39799b942213bf 100644
--- a/tests/files/gce_ubuntu-canal-ha.yml
+++ b/tests/files/gce_ubuntu-canal-ha.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-b
diff --git a/tests/files/gce_ubuntu-canal-kubeadm.yml b/tests/files/gce_ubuntu-canal-kubeadm.yml
index 1591415584426da5163d0c844ed868e6715c2444..705df48706f9d9a53ec906949f6a7592cf7c4c64 100644
--- a/tests/files/gce_ubuntu-canal-kubeadm.yml
+++ b/tests/files/gce_ubuntu-canal-kubeadm.yml
@@ -1,5 +1,6 @@
+---
 # Instance settings
-cloud_image_family:  ubuntu-1604-lts
+cloud_image_family: ubuntu-1604-lts
 cloud_machine_type: "n1-standard-1"
 cloud_region: us-central1-c
 mode: ha
diff --git a/tests/files/gce_ubuntu-cilium-sep.yml b/tests/files/gce_ubuntu-cilium-sep.yml
index 9892cf19f3b592224d5a5e5fcab9647a996f1184..4965060dbe075934e2d029611d2e4d09ba038432 100644
--- a/tests/files/gce_ubuntu-cilium-sep.yml
+++ b/tests/files/gce_ubuntu-cilium-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-b
@@ -9,4 +10,3 @@ deploy_netchecker: true
 enable_network_policy: true
 dns_min_replicas: 1
 cloud_provider: gce
-
diff --git a/tests/files/gce_ubuntu-contiv-sep.yml b/tests/files/gce_ubuntu-contiv-sep.yml
index 3d02319e895779f94c89f140d5f621af97b2bb87..342d0b8906f773898112038e46518cab4f32bf5f 100644
--- a/tests/files/gce_ubuntu-contiv-sep.yml
+++ b/tests/files/gce_ubuntu-contiv-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-b
diff --git a/tests/files/gce_ubuntu-flannel-ha.yml b/tests/files/gce_ubuntu-flannel-ha.yml
index ea4c13098d02a9b8df4ee23bc8f2ee7fdae39294..54aeac1c82777188c3257b08f081f4daec9f8743 100644
--- a/tests/files/gce_ubuntu-flannel-ha.yml
+++ b/tests/files/gce_ubuntu-flannel-ha.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-b
diff --git a/tests/files/gce_ubuntu-kube-router-sep.yml b/tests/files/gce_ubuntu-kube-router-sep.yml
index bf10e7730cdc574d94ac41f34ad01dbaa1ffb97b..6dd926078d93f6ccff98ef6cc0ad962788529b6c 100644
--- a/tests/files/gce_ubuntu-kube-router-sep.yml
+++ b/tests/files/gce_ubuntu-kube-router-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-c
diff --git a/tests/files/gce_ubuntu-rkt-sep.yml b/tests/files/gce_ubuntu-rkt-sep.yml
index c46807a47b3a0c8159f298ce300825ab0bd6a4a9..b2646a88d9261bd62ae686fdbdbca7587a8e2080 100644
--- a/tests/files/gce_ubuntu-rkt-sep.yml
+++ b/tests/files/gce_ubuntu-rkt-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-c
diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml
index 52d6c241646d9ceb72f1ab0e44cbaa34ec52fb46..dc97de7892fe71516f596d581d44cc7808ad1780 100644
--- a/tests/files/gce_ubuntu-weave-sep.yml
+++ b/tests/files/gce_ubuntu-weave-sep.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1604-lts
 cloud_region: us-central1-c
diff --git a/tests/files/gce_ubuntu18-flannel-aio.yml b/tests/files/gce_ubuntu18-flannel-aio.yml
index c6638b6d617c8395b48e71e1da96da89bf701fd5..be8f514dd6aeb4894a36104d9faae0322ed6e4c4 100644
--- a/tests/files/gce_ubuntu18-flannel-aio.yml
+++ b/tests/files/gce_ubuntu18-flannel-aio.yml
@@ -1,3 +1,4 @@
+---
 # Instance settings
 cloud_image_family: ubuntu-1804-lts
 cloud_region: us-central1-a
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 531b84c06434786a07e73346f71e9a05d06fe023..c4e8c105b3873f72dfc9cef13b35d7f5e99f2d74 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -17,9 +17,9 @@
     shell: "{{bin_dir}}/kubectl get pods -n test"
     register: pods
     until:
-      - '"ContainerCreating" not in pods.stdout'
-      - '"Pending" not in pods.stdout'
-      - '"Terminating" not in pods.stdout'
+    - '"ContainerCreating" not in pods.stdout'
+    - '"Pending" not in pods.stdout'
+    - '"Terminating" not in pods.stdout'
     retries: 60
     delay: 2
     no_log: true
@@ -69,12 +69,12 @@
     shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
     when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
     with_nested:
-      - "{{pod_names}}"
-      - "{{pod_ips}}"
+    - "{{pod_names}}"
+    - "{{pod_ips}}"
 
   - name: Ping between hostnet pods is working
     shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
     when: item[0] in pods_hostnet and item[1] in pods_hostnet
     with_nested:
-      - "{{pod_names}}"
-      - "{{pod_ips}}"
+    - "{{pod_names}}"
+    - "{{pod_ips}}"
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 3a4acfd55c507533431095fa82292dc3aed3e456..dd34846e860e77b045a920c3065fb0564e63f772 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -65,8 +65,8 @@
     - { role: kubespray-defaults}
     - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
-#Handle upgrades to master components first to maintain backwards compat.
-- hosts: kube-master
+- name: Handle upgrades to master components first to maintain backwards compat.
+  hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
   roles:
@@ -79,8 +79,8 @@
     - { role: upgrade/post-upgrade, tags: post-upgrade }
   environment: "{{proxy_env}}"
 
-#Upgrade calico on all masters and nodes
-- hosts: kube-master:kube-node
+- name: Upgrade calico on all masters and nodes
+  hosts: kube-master:kube-node
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -89,8 +89,8 @@
     - { role: kubernetes-apps/network_plugin, tags: network }
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
 
-#Finally handle worker upgrades, based on given batch size
-- hosts: kube-node:!kube-master
+- name: Finally handle worker upgrades, based on given batch size
+  hosts: kube-node:!kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles: