From e67f848abc35c5cf11861cf8be3b2c0c12ceace4 Mon Sep 17 00:00:00 2001
From: MarkusTeufelberger <markusteufelberger@gmail.com>
Date: Thu, 2 May 2019 23:24:21 +0200
Subject: [PATCH] ansible-lint: add spaces around variables [E206] (#4699)

---
 .ansible-lint                                 |  1 -
 cluster.yml                                   |  8 ++--
 .../roles/generate-inventory/tasks/main.yml   |  4 +-
 .../roles/generate-inventory_2/tasks/main.yml |  4 +-
 .../roles/generate-templates/tasks/main.yml   | 11 ++++--
 .../dind/roles/dind-cluster/tasks/main.yaml   |  4 +-
 contrib/dind/roles/dind-host/tasks/main.yaml  |  2 +-
 .../metallb/roles/provision/tasks/main.yml    |  2 +-
 .../kubernetes-pv/ansible/tasks/main.yaml     | 12 +++---
 .../provision/tasks/bootstrap/deploy.yml      |  2 +-
 .../provision/tasks/bootstrap/storage.yml     |  2 +-
 .../roles/provision/tasks/glusterfs.yml       |  4 +-
 .../heketi/roles/provision/tasks/heketi.yml   |  2 +-
 .../heketi/roles/provision/tasks/main.yml     |  2 +-
 .../heketi/roles/provision/tasks/secret.yml   | 12 +++---
 .../heketi/roles/provision/tasks/storage.yml  |  2 +-
 .../roles/provision/tasks/storageclass.yml    |  2 +-
 .../roles/vault/tasks/shared/check_etcd.yml   |  2 +-
 mitogen.yaml                                  | 20 +++++-----
 roles/adduser/tasks/main.yml                  | 18 ++++-----
 roles/container-engine/docker/tasks/main.yml  | 26 ++++++-------
 .../docker/tasks/set_facts_dns.yml            |  4 +-
 roles/download/defaults/main.yml              |  2 +-
 roles/download/tasks/download_file.yml        | 18 ++++-----
 roles/download/tasks/download_prep.yml        |  6 +--
 .../download/tasks/set_docker_image_facts.yml |  4 +-
 roles/download/tasks/sync_container.yml       |  4 +-
 roles/download/tasks/sync_file.yml            |  2 +-
 roles/etcd/tasks/check_certs.yml              |  6 +--
 roles/etcd/tasks/gen_certs_script.yml         | 20 +++++-----
 roles/etcd/tasks/main.yml                     |  4 +-
 .../ansible/tasks/netchecker.yml              | 18 ++++-----
 .../cluster_roles/tasks/main.yml              | 18 ++++-----
 .../cluster_roles/tasks/oci.yml               |  2 +-
 .../nvidia_gpu/tasks/main.yml                 |  6 +--
 .../local_volume_provisioner/tasks/main.yml   |  2 +-
 .../helm/tasks/gen_helm_tiller_certs.yml      | 24 ++++++------
 roles/kubernetes-apps/helm/tasks/main.yml     | 18 ++++-----
 .../network_plugin/calico/tasks/main.yml      |  8 ++--
 .../network_plugin/canal/tasks/main.yml       |  8 ++--
 .../network_plugin/cilium/tasks/main.yml      | 10 ++---
 .../network_plugin/flannel/tasks/main.yml     |  8 ++--
 .../network_plugin/kube-router/tasks/main.yml |  2 +-
 .../network_plugin/multus/tasks/main.yml      | 10 ++---
 .../openstack/tasks/main.yml                  |  6 +--
 .../policy_controller/calico/tasks/main.yml   | 12 +++---
 roles/kubernetes/kubeadm/tasks/main.yml       |  4 +-
 .../master/tasks/encrypt-at-rest.yml          |  4 +-
 .../tasks/kubeadm-secondary-experimental.yml  | 10 ++---
 .../kubernetes/master/tasks/kubeadm-setup.yml | 10 ++---
 roles/kubernetes/master/tasks/pre-upgrade.yml |  4 +-
 .../node/tasks/azure-credential-check.yml     |  2 +-
 roles/kubernetes/node/tasks/main.yml          |  2 +-
 .../node/templates/kubelet.host.service.j2    |  2 +-
 .../preinstall/tasks/0020-verify-settings.yml |  2 +-
 .../preinstall/tasks/0040-set_facts.yml       | 10 ++---
 .../tasks/0050-create_directories.yml         |  2 +-
 .../preinstall/tasks/0060-resolvconf.yml      |  8 ++--
 .../tasks/0080-system-configurations.yml      |  6 +--
 .../preinstall/tasks/0090-etchosts.yml        |  2 +-
 .../preinstall/tasks/0100-dhclient-hooks.yml  |  2 +-
 .../tasks/0110-dhclient-hooks-undo.yml        |  2 +-
 .../kubernetes/tokens/tasks/check-tokens.yml  |  2 +-
 roles/kubernetes/tokens/tasks/gen_tokens.yml  | 10 ++---
 roles/kubespray-defaults/defaults/main.yaml   |  2 +-
 roles/network_plugin/calico/rr/tasks/main.yml |  4 +-
 roles/network_plugin/calico/tasks/install.yml | 18 ++++-----
 roles/network_plugin/calico/tasks/upgrade.yml |  2 +-
 roles/network_plugin/canal/tasks/main.yml     |  6 +--
 roles/network_plugin/cilium/tasks/main.yml    |  4 +-
 roles/network_plugin/flannel/tasks/main.yml   |  4 +-
 .../kube-router/tasks/annotate.yml            | 12 +++---
 .../etcd/tasks/prepare.yml                    |  2 +-
 roles/remove-node/post-remove/tasks/main.yml  |  2 +-
 roles/reset/tasks/main.yml                    |  4 +-
 .../win_nodes/kubernetes_patch/tasks/main.yml |  4 +-
 scale.yml                                     |  2 +-
 scripts/collect-info.yaml                     | 14 +++----
 .../roles/kubevirt-images/tasks/main.yml      |  2 +-
 tests/cloud_playbooks/create-aws.yml          |  6 +--
 tests/cloud_playbooks/create-do.yml           | 26 ++++++-------
 tests/cloud_playbooks/create-gce.yml          | 24 ++++++------
 tests/cloud_playbooks/delete-gce.yml          | 22 +++++------
 tests/cloud_playbooks/upload-logs-gcs.yml     | 22 +++++------
 tests/testcases/015_check-pods-running.yml    | 10 ++---
 tests/testcases/030_check-network.yml         | 38 +++++++++----------
 tests/testcases/040_check-network-adv.yml     | 26 ++++++-------
 upgrade-cluster.yml                           | 12 +++---
 88 files changed, 363 insertions(+), 353 deletions(-)

diff --git a/.ansible-lint b/.ansible-lint
index 2bd18414c..c44f782b6 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -5,7 +5,6 @@ skip_list:
   # The following rules throw errors.
   # These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
   - '204'
-  - '206'
   - '301'
   - '305'
   - '306'
diff --git a/cluster.yml b/cluster.yml
index cc48fe459..1ee5fc2b7 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -34,7 +34,7 @@
   pre_tasks:
     - name: gather facts from all instances
       setup:
-      delegate_to: "{{item}}"
+      delegate_to: "{{ item }}"
       delegate_facts: true
       with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
       run_once: true
@@ -46,7 +46,7 @@
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
     - { role: download, tags: download, when: "not skip_downloads" }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -65,7 +65,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/node, tags: node }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -109,7 +109,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index b1e5c0ccc..409555fd0 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -8,4 +8,6 @@
     vm_list: "{{ vm_list_cmd.stdout }}"
 
 - name: Generate inventory
-  template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
+  template:
+    src: inventory.j2
+    dest: "{{ playbook_dir }}/inventory"
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index e7802b3a1..1772b1c29 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -13,4 +13,6 @@
     vm_roles_list: "{{ vm_list_cmd.stdout }}"
 
 - name: Generate inventory
-  template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
+  template:
+    src: inventory.j2
+    dest: "{{ playbook_dir }}/inventory"
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 4ee6d858c..92a0e87c9 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -1,10 +1,15 @@
 ---
 - set_fact:
-    base_dir: "{{playbook_dir}}/.generated/"
+    base_dir: "{{ playbook_dir }}/.generated/"
 
-- file: path={{base_dir}} state=directory recurse=true
+- file:
+    path: "{{ base_dir }}"
+    state: directory
+    recurse: true
 
-- template: src={{item}} dest="{{base_dir}}/{{item}}"
+- template:
+    src: "{{ item }}"
+    dest: "{{ base_dir }}/{{ item }}"
   with_items:
     - network.json
     - storage.json
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index affc99ea1..5b7c77e49 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -12,7 +12,7 @@
 - name: Null-ify some linux tools to ease DIND
   file:
     src: "/bin/true"
-    dest: "{{item}}"
+    dest: "{{ item }}"
     state: link
     force: yes
   with_items:
@@ -52,7 +52,7 @@
     - rsyslog
     - "{{ distro_ssh_service }}"
 
-- name: Create distro user "{{distro_user}}"
+- name: Create distro user "{{ distro_user }}"
   user:
     name: "{{ distro_user }}"
     uid: 1000
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index d125414c1..40ca53cd6 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -28,7 +28,7 @@
       - /lib/modules:/lib/modules
       - "{{ item }}:/dind/docker"
   register: containers
-  with_items: "{{groups.containers}}"
+  with_items: "{{ groups.containers }}"
   tags:
     - addresses
 
diff --git a/contrib/metallb/roles/provision/tasks/main.yml b/contrib/metallb/roles/provision/tasks/main.yml
index 6b9661de4..66fcc591c 100644
--- a/contrib/metallb/roles/provision/tasks/main.yml
+++ b/contrib/metallb/roles/provision/tasks/main.yml
@@ -9,7 +9,7 @@
 - name: "Kubernetes Apps | Install and configure MetalLB"
   kube:
     name: "MetalLB"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/{{ item.item }}"
     state: "{{ item.changed | ternary('latest','present') }}"
   become: true
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
index 2e108701a..baf8356b6 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
@@ -1,6 +1,8 @@
 ---
 - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
-  template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
+  template:
+    src: "{{ item.file }}"
+    dest: "{{ kube_config_dir }}/{{ item.dest }}"
   with_items:
     - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
     - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
@@ -12,9 +14,9 @@
   kube:
     name: glusterfs
     namespace: default
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.dest}}"
-    state: "{{item.changed | ternary('latest','present') }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
+    state: "{{ item.changed | ternary('latest','present') }}"
   with_items: "{{ gluster_pv.results }}"
   when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index ac5115a00..93b473295 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Wait for heketi bootstrap to complete."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
index be3c42caf..63a475a85 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
@@ -6,7 +6,7 @@
 - name: "Create heketi storage."
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
     state: "present"
   vars:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
index 105d9e2ac..5f00e28aa 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Kubernetes Apps | Label GlusterFS nodes"
@@ -33,6 +33,6 @@
 - name: "Kubernetes Apps | Install and configure Heketi Service Account"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-service-account.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 2052abefc..d322f6ff8 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure Heketi"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-deployment.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Ensure heketi is up and running."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
index 23a2b4f9c..1feb27d7b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
@@ -7,7 +7,7 @@
 
 - name: "Kubernetes Apps | Test Heketi"
   register: "heketi_service_state"
-  command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
   changed_when: false
 
 - name: "Kubernetes Apps | Bootstrap Heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
index 364bb29b2..96f243048 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -1,19 +1,19 @@
 ---
 - register: "clusterrolebinding_state"
-  command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
 - name: "Kubernetes Apps | Deploy cluster role binding."
   when: "clusterrolebinding_state.stdout == \"\""
-  command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
+  command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
 - register: "clusterrolebinding_state"
-  command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
 - assert:
     that: "clusterrolebinding_state.stdout != \"\""
     msg: "Cluster role binding is not present."
 
 - register: "secret_state"
-  command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
 - name: "Render Heketi secret configuration."
   become: true
@@ -22,9 +22,9 @@
     dest: "{{ kube_config_dir }}/heketi.json"
 - name: "Deploy Heketi config secret"
   when: "secret_state.stdout == \"\""
-  command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
+  command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
 - register: "secret_state"
-  command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
 - assert:
     that: "secret_state.stdout != \"\""
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
index f3861d9ec..210930804 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
@@ -7,6 +7,6 @@
 - name: "Kubernetes Apps | Install and configure Heketi Storage"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-storage.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index f878876bc..5bf3e3c4d 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -20,6 +20,6 @@
 - name: "Kubernetes Apps | Install and configure Storace Class"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/storageclass.yml"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/vault/roles/vault/tasks/shared/check_etcd.yml b/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
index 9ebed2bf1..f8599d536 100644
--- a/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
+++ b/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
@@ -11,7 +11,7 @@
   until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
   retries: 3
   delay: 2
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   run_once: true
   failed_when: false
   register: vault_etcd_health_check
diff --git a/mitogen.yaml b/mitogen.yaml
index fa9d4ec54..853c39f9c 100644
--- a/mitogen.yaml
+++ b/mitogen.yaml
@@ -3,29 +3,29 @@
   strategy: linear
   vars:
     mitogen_version: master
-    mitogen_url: https://github.com/dw/mitogen/archive/{{mitogen_version}}.zip
+    mitogen_url: https://github.com/dw/mitogen/archive/{{ mitogen_version }}.zip
   tasks:
     - name: Create mitogen plugin dir
       file:
-        path: "{{item}}"
+        path: "{{ item }}"
         state: directory
       become: false
       loop:
-        - "{{playbook_dir}}/plugins/mitogen"
-        - "{{playbook_dir}}/dist"
+        - "{{ playbook_dir }}/plugins/mitogen"
+        - "{{ playbook_dir }}/dist"
 
     - name: download mitogen release
       get_url:
-        url: "{{mitogen_url}}"
-        dest: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
+        url: "{{ mitogen_url }}"
+        dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
         validate_certs: true
 
     - name: extract zip
       unarchive:
-        src: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
-        dest: "{{playbook_dir}}/dist/"
+        src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
+        dest: "{{ playbook_dir }}/dist/"
 
     - name: copy plugin
       synchronize:
-        src: "{{playbook_dir}}/dist/mitogen-{{mitogen_version}}/"
-        dest: "{{playbook_dir}}/plugins/mitogen"
+        src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
+        dest: "{{ playbook_dir }}/plugins/mitogen"
diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml
index 3854ec411..774eb412b 100644
--- a/roles/adduser/tasks/main.yml
+++ b/roles/adduser/tasks/main.yml
@@ -1,15 +1,15 @@
 ---
 - name: User | Create User Group
   group:
-    name: "{{user.group|default(user.name)}}"
-    system: "{{user.system|default(omit)}}"
+    name: "{{ user.group|default(user.name) }}"
+    system: "{{ user.system|default(omit) }}"
 
 - name: User | Create User
   user:
-    comment: "{{user.comment|default(omit)}}"
-    createhome: "{{user.createhome|default(omit)}}"
-    group: "{{user.group|default(user.name)}}"
-    home: "{{user.home|default(omit)}}"
-    shell: "{{user.shell|default(omit)}}"
-    name: "{{user.name}}"
-    system: "{{user.system|default(omit)}}"
+    comment: "{{ user.comment|default(omit) }}"
+    createhome: "{{ user.createhome|default(omit) }}"
+    group: "{{ user.group|default(user.name) }}"
+    home: "{{ user.home|default(omit) }}"
+    shell: "{{ user.shell|default(omit) }}"
+    name: "{{ user.name }}"
+    system: "{{ user.system|default(omit) }}"
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index c9f677567..0b979cc7d 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -54,8 +54,8 @@
 - name: ensure docker-ce repository public key is installed
   action: "{{ docker_repo_key_info.pkg_key }}"
   args:
-    id: "{{item}}"
-    url: "{{docker_repo_key_info.url}}"
+    id: "{{ item }}"
+    url: "{{ docker_repo_key_info.url }}"
     state: present
   register: keyserver_task_result
   until: keyserver_task_result is succeeded
@@ -67,7 +67,7 @@
 - name: ensure docker-ce repository is enabled
   action: "{{ docker_repo_info.pkg_repo }}"
   args:
-    repo: "{{item}}"
+    repo: "{{ item }}"
     state: present
   with_items: "{{ docker_repo_info.repos }}"
   when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) and (docker_repo_info.repos|length > 0)
@@ -75,8 +75,8 @@
 - name: ensure docker-engine repository public key is installed
   action: "{{ dockerproject_repo_key_info.pkg_key }}"
   args:
-    id: "{{item}}"
-    url: "{{dockerproject_repo_key_info.url}}"
+    id: "{{ item }}"
+    url: "{{ dockerproject_repo_key_info.url }}"
     state: present
   register: keyserver_task_result
   until: keyserver_task_result is succeeded
@@ -90,7 +90,7 @@
 - name: ensure docker-engine repository is enabled
   action: "{{ dockerproject_repo_info.pkg_repo }}"
   args:
-    repo: "{{item}}"
+    repo: "{{ item }}"
     state: present
   with_items: "{{ dockerproject_repo_info.repos }}"
   when:
@@ -123,7 +123,7 @@
     baseurl: "{{ extras_rh_repo_base_url }}"
     file: "extras"
     gpgcheck: yes
-    gpgkey: "{{extras_rh_repo_gpgkey}}"
+    gpgkey: "{{ extras_rh_repo_gpgkey }}"
     keepcache: "{{ docker_rpm_keepcache | default('1') }}"
     proxy: " {{ http_proxy | default('_none_') }}"
   when:
@@ -148,10 +148,10 @@
 - name: ensure docker packages are installed
   action: "{{ docker_package_info.pkg_mgr }}"
   args:
-    pkg: "{{item.name}}"
-    force: "{{item.force|default(omit)}}"
-    conf_file: "{{item.yum_conf|default(omit)}}"
-    state: "{{item.state | default('present')}}"
+    pkg: "{{ item.name }}"
+    force: "{{ item.force|default(omit) }}"
+    conf_file: "{{ item.yum_conf|default(omit) }}"
+    state: "{{ item.state | default('present') }}"
     update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
   register: docker_task_result
   until: docker_task_result is succeeded
@@ -166,7 +166,7 @@
   action: "{{ docker_package_info.pkg_mgr }}"
   args:
     name: "{{ item.name }}"
-    state: "{{item.state | default('present')}}"
+    state: "{{ item.state | default('present') }}"
   with_items: "{{ docker_package_info.pkgs }}"
   register: docker_task_result
   until: docker_task_result is succeeded
@@ -185,7 +185,7 @@
 
 - name: show available packages on ubuntu
   fail:
-    msg: "{{available_packages}}"
+    msg: "{{ available_packages }}"
   when:
     - docker_task_result is failed
     - ansible_distribution == 'Ubuntu'
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index 3e621f524..99b9f0e26 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -2,11 +2,11 @@
 
 - name: set dns server for docker
   set_fact:
-    docker_dns_servers: "{{dns_servers}}"
+    docker_dns_servers: "{{ dns_servers }}"
 
 - name: show docker_dns_servers
   debug:
-    msg: "{{docker_dns_servers}}"
+    msg: "{{ docker_dns_servers }}"
 
 - name: set base docker dns facts
   set_fact:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index a661c0d00..b8e642bf5 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -29,7 +29,7 @@ download_always_pull: False
 download_validate_certs: True
 
 # Use the first kube-master if download_localhost is not set
-download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
+download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
 
 # Arch of Docker images and needed packages
 image_arch: "{{host_architecture | default('amd64')}}"
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 3993ef6b6..2bfb5f70f 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -7,7 +7,7 @@
 
 - name: file_download | Create dest directory
   file:
-    path: "{{download.dest|dirname}}"
+    path: "{{ download.dest | dirname }}"
     state: directory
     recurse: yes
   when:
@@ -20,9 +20,9 @@
 #   to one task in the future.
 - name: file_download | Download item (delegate)
   get_url:
-    url: "{{download.url}}"
-    dest: "{{download.dest}}"
-    sha256sum: "{{download.sha256 | default(omit)}}"
+    url: "{{ download.url }}"
+    dest: "{{ download.dest }}"
+    sha256sum: "{{ download.sha256|default(omit) }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     validate_certs: "{{ download_validate_certs }}"
@@ -43,9 +43,9 @@
 
 - name: file_download | Download item (all)
   get_url:
-    url: "{{download.url}}"
-    dest: "{{download.dest}}"
-    sha256sum: "{{download.sha256 | default(omit)}}"
+    url: "{{ download.url }}"
+    dest: "{{ download.dest }}"
+    sha256sum: "{{ download.sha256|default(omit) }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     validate_certs: "{{ download_validate_certs }}"
@@ -64,8 +64,8 @@
 
 - name: file_download | Extract archives
   unarchive:
-    src: "{{download.dest}}"
-    dest: "{{download.dest|dirname}}"
+    src: "{{ download.dest }}"
+    dest: "{{ download.dest |dirname }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     copy: no
diff --git a/roles/download/tasks/download_prep.yml b/roles/download/tasks/download_prep.yml
index 40ee8e981..6bb48fcbc 100644
--- a/roles/download/tasks/download_prep.yml
+++ b/roles/download/tasks/download_prep.yml
@@ -11,16 +11,16 @@
 
 - name: container_download | Create dest directory for saved/loaded container images
   file:
-    path: "{{local_release_dir}}/containers"
+    path: "{{ local_release_dir }}/containers"
     state: directory
     recurse: yes
     mode: 0755
-    owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
+    owner: "{{ ansible_ssh_user|default(ansible_user_id) }}"
   when: download_container
 
 - name: container_download | create local directory for saved/loaded container images
   file:
-    path: "{{local_release_dir}}/containers"
+    path: "{{ local_release_dir }}/containers"
     state: directory
     recurse: yes
   delegate_to: localhost
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 84ed88760..6fb00e5c0 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -5,7 +5,7 @@
 
 - set_fact:
     pull_args: >-
-      {%- if pull_by_digest %}{{download.repo}}@sha256:{{download.sha256}}{%- else -%}{{download.repo}}:{{download.tag}}{%- endif -%}
+      {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
 
 - name: Register docker images info
   shell: >-
@@ -33,7 +33,7 @@
 
 - name: Check the local digest sha256 corresponds to the given image tag
   assert:
-    that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')"
+    that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
   when:
     - not download_always_pull
     - not pull_required
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index 767469422..fd46766ee 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -8,7 +8,7 @@
     - facts
 
 - set_fact:
-    fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
+    fname: "{{ local_release_dir }}/containers/{{ download.repo|regex_replace('/|\0|:', '_') }}:{{ download.tag|default(download.sha256)|regex_replace('/|\0|:', '_') }}.tar"
   run_once: true
   when:
     - download.enabled
@@ -20,7 +20,7 @@
 
 - name: "container_download | Set default value for 'container_changed' to false"
   set_fact:
-    container_changed: "{{pull_required|default(false)}}"
+    container_changed: "{{ pull_required|default(false) }}"
   when:
     - download.enabled
     - download.container
diff --git a/roles/download/tasks/sync_file.yml b/roles/download/tasks/sync_file.yml
index 530a8237d..6813b0534 100644
--- a/roles/download/tasks/sync_file.yml
+++ b/roles/download/tasks/sync_file.yml
@@ -1,7 +1,7 @@
 ---
 - name: file_download | create local download destination directory
   file:
-    path: "{{download.dest|dirname}}"
+    path: "{{ download.dest|dirname }}"
     state: directory
     recurse: yes
     mode: 0755
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index b11a2e9e4..e0ee9f7e9 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -4,7 +4,7 @@
     paths: "{{ etcd_cert_dir }}"
     patterns: "ca.pem,node*.pem"
     get_checksum: true
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   register: etcdcert_master
   run_once: true
 
@@ -30,10 +30,10 @@
   with_items: "{{ expected_files }}"
   vars:
     expected_files: >-
-       ['{{etcd_cert_dir}}/ca.pem',
+       ['{{ etcd_cert_dir }}/ca.pem',
        {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
        {% for host in all_etcd_hosts %}
-       '{{etcd_cert_dir}}/node-{{ host }}-key.pem'
+       '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
        {% if not loop.last %}{{','}}{% endif %}
        {% endfor %}]
 
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 63208d54a..66b2030a5 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -8,7 +8,7 @@
     mode: 0700
     recurse: yes
 
-- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
+- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
   file:
     path: "{{ etcd_script_dir }}"
     state: directory
@@ -16,9 +16,9 @@
     mode: 0700
   run_once: yes
   when: inventory_hostname == groups['etcd'][0]
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
 
-- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
+- name: "Gen_certs | create etcd cert dir (on {{ groups['etcd'][0] }})"
   file:
     path: "{{ etcd_cert_dir }}"
     group: "{{ etcd_cert_group }}"
@@ -28,14 +28,14 @@
     mode: 0700
   run_once: yes
   when: inventory_hostname == groups['etcd'][0]
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
 
 - name: Gen_certs | write openssl config
   template:
     src: "openssl.conf.j2"
     dest: "{{ etcd_config_dir }}/openssl.conf"
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
     - inventory_hostname == groups['etcd'][0]
@@ -46,7 +46,7 @@
     dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
     mode: 0700
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
     - inventory_hostname == groups['etcd'][0]
@@ -65,7 +65,7 @@
                 {% endif %}
               {% endfor %}"
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
   notify: set etcd_secret_changed
@@ -87,7 +87,7 @@
         '{{ etcd_cert_dir }}/node-{{ node }}.pem',
         '{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
         {% endfor %}]"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - inventory_hostname in groups['etcd']
     - sync_certs|default(false)
@@ -133,13 +133,13 @@
   no_log: true
   register: etcd_node_certs
   check_mode: no
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
         inventory_hostname in groups['k8s-cluster']) and
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | Copy certs on nodes
-  shell: "base64 -d <<< '{{etcd_node_certs.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
+  shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
   args:
     executable: /bin/bash
   no_log: true
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 88f78be00..c729b880d 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -8,9 +8,9 @@
   set_fact:
     host_architecture: >-
       {%- if ansible_architecture in architecture_groups -%}
-      {{architecture_groups[ansible_architecture]}}
+      {{ architecture_groups[ansible_architecture] }}
       {%- else -%}
-       {{ansible_architecture}}
+       {{ ansible_architecture }}
       {% endif %}
 
 - include_tasks: check_certs.yml
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index cf115db77..d99700dbb 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -13,7 +13,7 @@
     name: "netchecker-server"
     namespace: "{{ netcheck_namespace }}"
     filename: "{{ netchecker_server_manifest.stat.path }}"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "deploy"
     state: latest
   when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
@@ -39,13 +39,13 @@
 
 - name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
   set_fact:
-    netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates}}"
+    netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
   when: podsecuritypolicy_enabled
 
 - name: Kubernetes Apps | Lay Down Netchecker Template
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items: "{{ netchecker_templates }}"
   register: manifests
   when:
@@ -53,11 +53,11 @@
 
 - name: Kubernetes Apps | Start Netchecker Resources
   kube:
-    name: "{{item.item.name}}"
-    namespace: "{{netcheck_namespace}}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    name: "{{ item.item.name }}"
+    namespace: "{{ netcheck_namespace }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index f58dda1bb..675417492 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -41,10 +41,10 @@
 
 - name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
   kube:
-    name: "{{item.item.name}}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    name: "{{ item.item.name }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   register: result
   until: result is succeeded
@@ -69,7 +69,7 @@
 - name: Apply workaround to allow all nodes with cert O=system:nodes to register
   kube:
     name: "kubespray:system:node"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/node-crb.yml"
     state: latest
@@ -96,7 +96,7 @@
 - name: Apply webhook ClusterRole
   kube:
     name: "system:node-webhook"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrole"
     filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
     state: latest
@@ -121,7 +121,7 @@
 - name: Grant system:nodes the webhook ClusterRole
   kube:
     name: "system:node-webhook"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
     state: latest
@@ -164,7 +164,7 @@
 - name: Apply vsphere-cloud-provider ClusterRole
   kube:
     name: "system:vsphere-cloud-provider"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
     state: latest
@@ -194,7 +194,7 @@
 - name: PriorityClass | Create k8s-cluster-critical
   kube:
     name: k8s-cluster-critical
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "PriorityClass"
     filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     state: latest
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
index 54ee49d78..22b39b3d4 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -10,7 +10,7 @@
 
 - name: Apply OCI RBAC
   kube:
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/oci-rbac.yml"
   when:
   - cloud_provider is defined
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
index 50822be7d..fd3ea42fa 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
@@ -13,12 +13,12 @@
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
   set_fact:
-    nvidia_driver_download_url_default: "{{nvidia_gpu_tesla_base_url}}{{nvidia_url_end}}"
+    nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
   when: nvidia_gpu_flavor|lower == "tesla"
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
   set_fact:
-    nvidia_driver_download_url_default: "{{nvidia_gpu_gtx_base_url}}{{nvidia_url_end}}"
+    nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
   when: nvidia_gpu_flavor|lower == "gtx"
 
 - name: Container Engine Acceleration Nvidia GPU | Create addon dir
@@ -49,6 +49,6 @@
     filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
     state: "latest"
   with_items:
-    - "{{container_engine_accelerator_manifests.results}}"
+    - "{{ container_engine_accelerator_manifests.results }}"
   when:
     - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index 6b970317e..2359588b5 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -9,7 +9,7 @@
   delegate_to: "{{ item[0] }}"
   with_nested:
     - "{{ groups['k8s-cluster'] }}"
-    - "{{ local_volume_provisioner_storage_classes.keys() | list}}"
+    - "{{ local_volume_provisioner_storage_classes.keys() | list }}"
 
 - name: Local Volume Provisioner | Create addon dir
   file:
diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
index 69d0cd2f9..053fbc0db 100644
--- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
+++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
@@ -1,15 +1,15 @@
 ---
-- name: "Gen_helm_tiller_certs | Create helm config directory (on {{groups['kube-master'][0]}})"
+- name: "Gen_helm_tiller_certs | Create helm config directory (on {{ groups['kube-master'][0] }})"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   file:
     path: "{{ helm_config_dir }}"
     state: directory
     owner: kube
 
-- name: "Gen_helm_tiller_certs | Create helm script directory (on {{groups['kube-master'][0]}})"
+- name: "Gen_helm_tiller_certs | Create helm script directory (on {{ groups['kube-master'][0] }})"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   file:
     path: "{{ helm_script_dir }}"
     state: directory
@@ -17,24 +17,24 @@
 
 - name: Gen_helm_tiller_certs | Copy certs generation script
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   template:
     src: "helm-make-ssl.sh.j2"
     dest: "{{ helm_script_dir }}/helm-make-ssl.sh"
     mode: 0700
 
-- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{groups['kube-master'][0]}})"
+- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{ groups['kube-master'][0] }})"
   find:
     paths: "{{ helm_home_dir }}"
     patterns: "*.pem"
     get_checksum: true
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   register: helmcert_master
   run_once: true
 
 - name: Gen_helm_tiller_certs | run cert generation script
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
 
 - set_fact:
@@ -64,7 +64,7 @@
   no_log: true
   register: helm_client_cert_data
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
 
 - name: Gen_helm_tiller_certs | Use tempfile for unpacking certs on masters
@@ -78,8 +78,8 @@
 
 - name: Gen_helm_tiller_certs | Write helm client certs to tempfile
   copy:
-    content: "{{helm_client_cert_data.stdout}}"
-    dest: "{{helm_cert_tempfile.path}}"
+    content: "{{ helm_client_cert_data.stdout }}"
+    dest: "{{ helm_cert_tempfile.path }}"
     owner: root
     mode: "0600"
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
@@ -93,7 +93,7 @@
 
 - name: Gen_helm_tiller_certs | Cleanup tempfile on masters
   file:
-    path: "{{helm_cert_tempfile.path}}"
+    path: "{{ helm_cert_tempfile.path }}"
     state: absent
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
 
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 2a9843de7..900261fd2 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -7,8 +7,8 @@
 
 - name: Helm | Lay Down Helm Manifests (RBAC)
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: tiller, file: tiller-namespace.yml, type: namespace}
     - {name: tiller, file: tiller-sa.yml, type: sa}
@@ -20,11 +20,11 @@
 
 - name: Helm | Apply Helm Manifests (RBAC)
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "{{ tiller_namespace }}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
   when:
@@ -56,7 +56,7 @@
     {% endif %}
   register: install_helm
   changed_when: false
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 # FIXME: https://github.com/helm/helm/issues/4063
 - name: Helm | Force apply tiller overrides if necessary
@@ -73,12 +73,12 @@
     {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
     {% if tiller_wait %} --wait{% endif %}
     --output yaml
-    | {{bin_dir}}/kubectl apply -f -
+    | {{ bin_dir }}/kubectl apply -f -
   changed_when: false
   when:
     - (tiller_override is defined and tiller_override) or (kube_version is version('v1.11.1', '>='))
     - inventory_hostname == groups['kube-master'][0]
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - name: Make sure bash_completion.d folder exists
   file:
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index d82da98ae..65fb9d515 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Start Calico resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items:
     - "{{ calico_node_manifests.results }}"
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index d5776def1..b495106b1 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Canal | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ canal_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 363f795a4..1baaa1ce6 100755
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -1,17 +1,17 @@
 ---
 - name: Cilium | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ cilium_node_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
 
 - name: Cilium | Wait for pods to run
-  command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
   register: pods_not_ready
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index 2be0739f8..3ed49db81 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Flannel | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ flannel_node_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index f5ff16308..3b76c4336 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -12,7 +12,7 @@
     - inventory_hostname == groups['kube-master'][0]
 
 - name: kube-router | Wait for kube-router pods to be ready
-  command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
   retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
index 9d7669cc7..48d00538c 100644
--- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Multus | Start resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
-  with_items: "{{ multus_manifest_1.results }} + {{multus_manifest_2.results }}"
+  with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item|skipped
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index 80d5fdd29..629c6add7 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
   template:
     src: "openstack-storage-class.yml.j2"
-    dest: "{{kube_config_dir}}/openstack-storage-class.yml"
+    dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
   register: manifests
   when:
     - inventory_hostname == groups['kube-master'][0]
@@ -10,9 +10,9 @@
 - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
   kube:
     name: storage-class
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: StorageClass
-    filename: "{{kube_config_dir}}/openstack-storage-class.yml"
+    filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
     state: "latest"
   when:
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index 1f262affa..bbd39d63f 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -10,8 +10,8 @@
 
 - name: Create calico-kube-controllers manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment}
     - {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa}
@@ -24,11 +24,11 @@
 
 - name: Start of Calico kube controllers
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items:
     - "{{ calico_kube_manifests.results }}"
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index a79fdee12..373362427 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -77,7 +77,7 @@
     - name: Join to cluster
       command: >-
         {{ bin_dir }}/kubeadm join
-        --config {{ kube_config_dir}}/kubeadm-client.conf
+        --config {{ kube_config_dir }}/kubeadm-client.conf
         --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
       register: kubeadm_join
       async: 120
@@ -88,7 +88,7 @@
     - name: Join to cluster with ignores
       command: >-
         {{ bin_dir }}/kubeadm join
-        --config {{ kube_config_dir}}/kubeadm-client.conf
+        --config {{ kube_config_dir }}/kubeadm-client.conf
         --ignore-preflight-errors=all
       register: kubeadm_join
       async: 60
diff --git a/roles/kubernetes/master/tasks/encrypt-at-rest.yml b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
index 192790039..09584dce8 100644
--- a/roles/kubernetes/master/tasks/encrypt-at-rest.yml
+++ b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
@@ -12,12 +12,12 @@
 
 - name: Base 64 Decode slurped secrets_encryption.yaml file
   set_fact:
-    secret_file_decoded: "{{secret_file_encoded['content'] | b64decode | from_yaml}}"
+    secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
   when: secrets_encryption_file.stat.exists
 
 - name: Extract secret value from secrets_encryption.yaml
   set_fact:
-    kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode}}"
+    kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
   when: secrets_encryption_file.stat.exists
 
 - name: Set kube_encrypt_token across master nodes
diff --git a/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
index c3afb5c0c..e1dfef01c 100644
--- a/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
@@ -5,7 +5,7 @@
       {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
       {{ first_kube_master }}:{{ kube_apiserver_port }}
       {%- else -%}
-      {{ kube_apiserver_endpoint | regex_replace('https://', '')}}
+      {{ kube_apiserver_endpoint | regex_replace('https://', '') }}
       {%- endif %}
   tags:
     - facts
@@ -21,15 +21,15 @@
 
 - name: Wait for k8s apiserver
   wait_for:
-    host: "{{kubeadm_discovery_address.split(':')[0]}}"
-    port: "{{kubeadm_discovery_address.split(':')[1]}}"
+    host: "{{ kubeadm_discovery_address.split(':')[0] }}"
+    port: "{{ kubeadm_discovery_address.split(':')[1] }}"
     timeout: 180
 
 
 - name: Upload certificates so they are fresh and not expired
   command: >-
     {{ bin_dir }}/kubeadm init phase
-    --config {{ kube_config_dir}}/kubeadm-config.yaml
+    --config {{ kube_config_dir }}/kubeadm-config.yaml
     upload-certs --experimental-upload-certs
     {% if kubeadm_certificate_key is defined %}
     --certificate-key={{ kubeadm_certificate_key }}
@@ -46,7 +46,7 @@
 - name: Joining control plane node to the cluster.
   command: >-
     {{ bin_dir }}/kubeadm join
-    --config {{ kube_config_dir}}/kubeadm-controlplane.yaml
+    --config {{ kube_config_dir }}/kubeadm-controlplane.yaml
     --ignore-preflight-errors=all
     {% if kubeadm_certificate_key is defined %}
     --certificate-key={{ kubeadm_certificate_key }}
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index 24c91d1be..a00702c95 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -3,7 +3,7 @@
   stat:
     path: "{{ kube_cert_dir }}/apiserver.pem"
   register: old_apiserver_cert
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   run_once: true
 
 - name: kubeadm | Migrate old certs if necessary
@@ -41,14 +41,14 @@
 
 - name: kubeadm | Delete old static pods
   file:
-    path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
+    path: "{{ kube_config_dir }}/manifests/{{ item }}.manifest"
     state: absent
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
   when:
     - old_apiserver_cert.stat.exists
 
 - name: kubeadm | Forcefully delete old static pods
-  shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
+  shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when:
     - old_apiserver_cert.stat.exists
@@ -147,7 +147,7 @@
   retries: 5
   delay: 5
   until: temp_token is succeeded
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   when: kubeadm_token is not defined
   tags:
     - kubeadm_token
@@ -190,6 +190,6 @@
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   when: inventory_hostname in groups['kube-node']
   failed_when: false
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 3fd9855ea..d6ce320ba 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -1,7 +1,7 @@
 ---
 - name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
   file:
-    path: "/etc/kubernetes/manifests/{{item}}.manifest"
+    path: "/etc/kubernetes/manifests/{{ item }}.manifest"
     state: absent
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
@@ -9,7 +9,7 @@
   when: etcd_secret_changed|default(false)
 
 - name: "Pre-upgrade | Delete master containers forcefully"
-  shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
+  shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when: kube_apiserver_manifest_replaced.changed
diff --git a/roles/kubernetes/node/tasks/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml
index 840b5bbfc..529bc3f8f 100644
--- a/roles/kubernetes/node/tasks/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/azure-credential-check.yml
@@ -56,7 +56,7 @@
 
 - name: check azure_loadbalancer_sku value
   fail:
-    msg: "azure_loadbalancer_sku has an invalid value '{{azure_loadbalancer_sku}}'. Supported values are 'basic', 'standard'"
+    msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
   when: azure_loadbalancer_sku not in ["basic", "standard"]
 
 - name: "check azure_exclude_master_from_standard_lb is a bool"
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 9c249fc77..0a593b3a0 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -65,7 +65,7 @@
 - name: Verify if br_netfilter module exists
   shell: "modinfo br_netfilter"
   environment:
-    PATH: "{{ ansible_env.PATH}}:/sbin"  # Make sure we can workaround RH's conservative path management
+    PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH's conservative path management
   register: modinfo_br_netfilter
   failed_when: modinfo_br_netfilter.rc not in [0, 1]
   changed_when: false
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index 3584cfcf5..96de60d6e 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -6,7 +6,7 @@ Wants=docker.socket
 
 [Service]
 User=root
-EnvironmentFile=-{{kube_config_dir}}/kubelet.env
+EnvironmentFile=-{{ kube_config_dir }}/kubelet.env
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
 ExecStart={{ bin_dir }}/kubelet \
 		$KUBE_LOGTOSTDERR \
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 79df9b396..e7184d4a5 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -35,7 +35,7 @@
 - name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
   assert:
     that: item.value|type_debug == 'bool'
-    msg: "{{item.value}} isn't a bool"
+    msg: "{{ item.value }} isn't a bool"
   run_once: yes
   with_items:
     - { name: download_run_once, value: "{{ download_run_once }}" }
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index 607197475..c06207fd0 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -8,9 +8,9 @@
   set_fact:
     host_architecture: >-
       {%- if ansible_architecture in architecture_groups -%}
-      {{architecture_groups[ansible_architecture]}}
+      {{ architecture_groups[ansible_architecture] }}
       {%- else -%}
-       {{ansible_architecture}}
+       {{ ansible_architecture }}
       {% endif %}
 
 - name: Force binaries directory for Container Linux by CoreOS
@@ -46,7 +46,7 @@
 - set_fact:
     bogus_domains: |-
       {% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
-      {{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./
+      {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
       {%- endfor %}
     cloud_resolver: >-
       {%- if cloud_provider is defined and cloud_provider == 'gce' -%}
@@ -139,9 +139,9 @@
 - name: generate nameservers to resolvconf
   set_fact:
     nameserverentries:
-      nameserver {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ')}}
+      nameserver {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ') }}
     supersede_nameserver:
-      supersede domain-name-servers {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
+      supersede domain-name-servers {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
 
 - name: gather os specific variables
   include_vars: "{{ item }}"
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 93b95a32b..1e28d1785 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -17,7 +17,7 @@
     - master
     - node
   with_items:
-    - "{{bin_dir}}"
+    - "{{ bin_dir }}"
     - "{{ kube_config_dir }}"
     - "{{ kube_cert_dir }}"
     - "{{ kube_manifest_dir }}"
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 099077753..a57e567fe 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -5,7 +5,7 @@
 
 - name: Add domain/search/nameservers/options to resolv.conf
   blockinfile:
-    path: "{{resolvconffile}}"
+    path: "{{ resolvconffile }}"
     block: |-
       {% for item in [domainentry] + [searchentries] + nameserverentries.split(',') -%}
       {{ item }}
@@ -22,7 +22,7 @@
 
 - name: Remove search/domain/nameserver options before block
   replace:
-    dest: "{{item[0]}}"
+    dest: "{{ item[0] }}"
     regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
     backup: yes
     follow: yes
@@ -33,7 +33,7 @@
 
 - name: Remove search/domain/nameserver options after block
   replace:
-    dest: "{{item[0]}}"
+    dest: "{{ item[0] }}"
     regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+'
     replace: '\1'
     backup: yes
@@ -51,7 +51,7 @@
 
 - name: persist resolvconf cloud init file
   template:
-    dest: "{{resolveconf_cloud_init_conf}}"
+    dest: "{{ resolveconf_cloud_init_conf }}"
     src: resolvconf.j2
     owner: root
     mode: 0644
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index eb87b14f4..5e2c87b55 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -31,14 +31,14 @@
 
 - name: Stat sysctl file configuration
   stat:
-    path: "{{sysctl_file_path}}"
+    path: "{{ sysctl_file_path }}"
   register: sysctl_file_stat
   tags:
     - bootstrap-os
 
 - name: Change sysctl file path to link source if linked
   set_fact:
-    sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
+    sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}"
   when:
     - sysctl_file_stat.stat.islnk is defined
     - sysctl_file_stat.stat.islnk
@@ -52,7 +52,7 @@
 
 - name: Enable ip forwarding
   sysctl:
-    sysctl_file: "{{sysctl_file_path}}"
+    sysctl_file: "{{ sysctl_file_path }}"
     name: net.ipv4.ip_forward
     value: 1
     state: present
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 02fc3c420..1298b7852 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -5,7 +5,7 @@
     block: |-
       {% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
       {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or fallback_ips[item] != "skip" -%}
-      {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item]))}}
+      {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}
       {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
       {% endif %}
       {% endfor %}
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 9165c09f8..52ffb8b86 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -5,7 +5,7 @@
       {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
       {{ item }}
       {% endfor %}
-    path: "{{dhclientconffile}}"
+    path: "{{ dhclientconffile }}"
     create: yes
     state: present
     insertbefore: BOF
diff --git a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
index a184ddabc..cf935a363 100644
--- a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
+++ b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
@@ -5,7 +5,7 @@
 
 - name: Remove kubespray specific config from dhclient config
   blockinfile:
-    path: "{{dhclientconffile}}"
+    path: "{{ dhclientconffile }}"
     state: absent
     backup: yes
     marker: "# Ansible entries {mark}"
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index 0f0c95b48..5d2792873 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -2,7 +2,7 @@
 - name: "Check_tokens | check if the tokens have already been generated on first master"
   stat:
     path: "{{ kube_token_dir }}/known_tokens.csv"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   register: known_tokens_master
   run_once: true
 
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 660b7367a..9507a9323 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_script_dir }}/kube-gen-token.sh"
     mode: 0700
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for master components
@@ -18,7 +18,7 @@
   register: gentoken_master
   changed_when: "'Added' in gentoken_master.stdout"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for node components
@@ -31,14 +31,14 @@
   register: gentoken_node
   changed_when: "'Added' in gentoken_node.stdout"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | Get list of tokens from first master
   shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
   register: tokens_list
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
@@ -48,7 +48,7 @@
     warn: false
   register: tokens_data
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 5cdd143e3..c0401cd24 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -376,7 +376,7 @@ contiv_global_neighbor_as: "500"
 fallback_ips_base: |
   ---
   {% for item in groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([])|unique %}
-  {{item}}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
+  {{ item }}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
   {% endfor %}
 fallback_ips: "{{ fallback_ips_base | from_yaml }}"
 
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index d7f02588c..41e8c85da 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -61,7 +61,7 @@
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - calico_version is version("v3.0.0", ">=")
 
@@ -79,7 +79,7 @@
     ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - calico_version is version("v3.0.0", "<")
 
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 321669add..b4923cec8 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -155,7 +155,7 @@
     - calico_version is version('v3.0.0', '>=')
 
 - name: Calico | Set global as_num (legacy)
-  command: "{{ bin_dir}}/calicoctl.sh config set asNumber {{ global_as_num }}"
+  command: "{{ bin_dir }}/calicoctl.sh config set asNumber {{ global_as_num }}"
   run_once: true
   when:
     - calico_version is version('v3.0.0', '<')
@@ -301,7 +301,7 @@
       "name": "{{ inventory_hostname }}-{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
    },
    "spec": {
-      "asNumber": "{{ local_as | default(global_as_num)}}",
+      "asNumber": "{{ local_as | default(global_as_num) }}",
       "node": "{{ inventory_hostname }}",
       "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
    }}' | {{ bin_dir }}/calicoctl.sh create --skip-exists -f -
@@ -319,7 +319,7 @@
   shell: >
    echo '{
    "kind": "bgpPeer",
-   "spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
+   "spec": {"asNumber": "{{ local_as | default(global_as_num) }}"},
    "apiVersion": "v1",
    "metadata": {"node": "{{ inventory_hostname }}",
      "scope": "node",
@@ -338,8 +338,8 @@
 
 - name: Calico | Create calico manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico-config, file: calico-config.yml, type: cm}
     - {name: calico-node, file: calico-node.yml, type: ds}
@@ -353,8 +353,8 @@
 
 - name: Calico | Create calico manifests for kdd
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico, file: kdd-crds.yml, type: kdd}
   register: calico_node_kdd_manifest
@@ -364,8 +364,8 @@
 
 - name: Calico | Create calico manifests for typha
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico, file: calico-typha.yml, type: typha}
   register: calico_node_typha_manifest
diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml
index 9754d058e..a4b7cffd6 100644
--- a/roles/network_plugin/calico/tasks/upgrade.yml
+++ b/roles/network_plugin/calico/tasks/upgrade.yml
@@ -7,7 +7,7 @@
     owner: root
     group: root
     force: yes
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 - name: "Create etcdv2 and etcdv3 calicoApiConfig"
   template:
     src: "{{ item }}-store.yml.j2"
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index acf0d3567..3de079b5f 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -31,7 +31,7 @@
     '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   changed_when: false
   run_once: true
   environment:
@@ -40,8 +40,8 @@
 
 - name: Canal | Create canal node manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: canal-config, file: canal-config.yaml, type: cm}
     - {name: canal-node, file: canal-node.yaml, type: ds}
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 44ab4ae57..e830818e9 100755
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -27,8 +27,8 @@
 
 - name: Cilium | Create Cilium node manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: cilium, file: cilium-config.yml, type: cm}
     - {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml
index 0124fe237..d5a725baf 100644
--- a/roles/network_plugin/flannel/tasks/main.yml
+++ b/roles/network_plugin/flannel/tasks/main.yml
@@ -1,8 +1,8 @@
 ---
 - name: Flannel | Create Flannel manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: flannel, file: cni-flannel-rbac.yml, type: sa}
     - {name: kube-flannel, file: cni-flannel.yml, type: ds}
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index a6a481e4c..eb70b0fbe 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -1,21 +1,21 @@
 ---
 - name: kube-router | Add annotations on kube-master
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_master }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
 
 - name: kube-router | Add annotations on kube-node
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
 
 - name: kube-router | Add common annotations on all servers
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_all is defined and inventory_hostname in groups['all']
\ No newline at end of file
diff --git a/roles/recover_control_plane/etcd/tasks/prepare.yml b/roles/recover_control_plane/etcd/tasks/prepare.yml
index 0f00f0338..d3cacb934 100644
--- a/roles/recover_control_plane/etcd/tasks/prepare.yml
+++ b/roles/recover_control_plane/etcd/tasks/prepare.yml
@@ -32,7 +32,7 @@
     - old_etcd_members is defined
 
 - name: Remove old cluster members
-  shell: "{{ bin_dir}}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
+  shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
   environment:
     - ETCDCTL_API: 3
     - ETCDCTL_CA_FILE: /etc/ssl/etcd/ssl/ca.pem
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index b820bff09..530cb29df 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: Delete node
-  command: "{{ bin_dir}}/kubectl delete node {{ item }}"
+  command: "{{ bin_dir }}/kubectl delete node {{ item }}"
   with_items:
     - "{{ node.split(',') | default(groups['kube-node']) }}"
   delegate_to: "{{ groups['kube-master']|first }}"
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 8cd3ef23a..56039fb0d 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -118,7 +118,7 @@
     - mounts
 
 - name: reset | unmount kubelet dirs
-  command: umount -f {{item}}
+  command: umount -f {{ item }}
   with_items: '{{ mounted_dirs.stdout_lines }}'
   register: umount_dir
   retries: 4
@@ -170,7 +170,7 @@
     path: "{{ item }}"
     state: absent
   with_items:
-    - "{{kube_config_dir}}"
+    - "{{ kube_config_dir }}"
     - /var/lib/kubelet
     - /root/.kube
     - /root/.helm
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index b2a3ad897..e81e5c79f 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -16,11 +16,11 @@
 
     # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
     - name: Check current nodeselector for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
+      shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
       register: current_kube_proxy_state
 
     - name: Apply nodeselector patch for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
+      shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
       args:
         chdir: "{{ kubernetes_user_manifests_path }}"
       register: patch_kube_proxy_state
diff --git a/scale.yml b/scale.yml
index 723debbb3..c7f3dfd31 100644
--- a/scale.yml
+++ b/scale.yml
@@ -53,4 +53,4 @@
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: network_plugin, tags: network }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index db577161b..9ba68c0e1 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -32,13 +32,13 @@
       - name: etcd_info
         cmd: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | default('http://127.0.0.1:2379') }} cluster-health"
       - name: calico_info
-        cmd: "{{bin_dir}}/calicoctl node status"
+        cmd: "{{ bin_dir }}/calicoctl node status"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: calico_workload_info
-        cmd: "{{bin_dir}}/calicoctl get workloadEndpoint -o wide"
+        cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: calico_pool_info
-        cmd: "{{bin_dir}}/calicoctl get ippool -o wide"
+        cmd: "{{ bin_dir }}/calicoctl get ippool -o wide"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: weave_info
         cmd: weave report
@@ -111,19 +111,19 @@
     - name: Storing commands output
       shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
       failed_when: false
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
       when: item.when | default(True)
       no_log: True
 
     - name: Fetch results
       fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
       when: item.when | default(True)
       failed_when: false
 
     - name: Fetch logs
       fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs
-      with_items: "{{logs}}"
+      with_items: "{{ logs }}"
       failed_when: false
 
     - name: Pack results and logs
@@ -137,4 +137,4 @@
 
     - name: Clean up collected command outputs
       file: path={{ item.name }} state=absent
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index a09960d96..270a39f7b 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -40,7 +40,7 @@
     dest: "{{ images_dir }}/Dockerfile"
 
 - name: Create docker images for each OS
-  command: docker build -t {{registry}}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
+  command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
   with_dict:
     - "{{ images }}"
 
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index 7dbf3d6c3..dcc51bdf7 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -10,8 +10,8 @@
       aws_access_key: "{{ aws.access_key }}"
       aws_secret_key: "{{ aws.secret_key }}"
       region: "{{ aws.region }}"
-      group_id: "{{ aws.group}}"
-      instance_type: "{{ aws.instance_type}}"
+      group_id: "{{ aws.group }}"
+      instance_type: "{{ aws.instance_type }}"
       image: "{{ aws.ami_id }}"
       wait: true
       count: "{{ aws.count }}"
@@ -30,4 +30,4 @@
       timeout: 300
       state: started
     delegate_to: localhost
-    with_items: "{{ec2.instances}}"
+    with_items: "{{ ec2.instances }}"
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 86e97f1bb..02fab16e4 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -52,20 +52,20 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - name: show vars
-      debug: msg="{{cloud_region}}, {{cloud_image}}"
+      debug: msg="{{ cloud_region }}, {{ cloud_image }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
-          ["k8s-{{test_name}}-1", "k8s-{{test_name}}-2", "k8s-{{test_name}}-3"]
+          ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
           {%- else -%}
-          ["k8s-{{test_name}}-1", "k8s-{{test_name}}-2"]
+          ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"]
           {%- endif -%}
 
-    - name: Manage DO instances | {{state}}
+    - name: Manage DO instances | {{ state }}
       digital_ocean:
         unique_name: yes
         api_token: "{{ lookup('env','DO_API_TOKEN') }}"
@@ -73,16 +73,16 @@
         image_id: "{{ cloud_image }}"
         name: "{{ item }}"
         private_networking: no
-        region_id: "{{cloud_region}}"
-        size_id: "{{cloud_machine_type}}"
-        ssh_key_ids: "{{ssh_key_id}}"
-        state: "{{state}}"
+        region_id: "{{ cloud_region }}"
+        size_id: "{{ cloud_machine_type }}"
+        ssh_key_ids: "{{ ssh_key_id }}"
+        state: "{{ state }}"
         wait: yes
       register: droplets
-      with_items: "{{instance_names}}"
+      with_items: "{{ instance_names }}"
 
     - debug:
-        msg: "{{droplets}}, {{inventory_path}}"
+        msg: "{{ droplets }}, {{ inventory_path }}"
       when: state == 'present'
 
     - name: Template the inventory
@@ -92,6 +92,6 @@
       when: state == 'present'
 
     - name: Wait for SSH to come up
-      wait_for: host={{item.droplet.ip_address}} port=22 delay=10 timeout=180 state=started
-      with_items: "{{droplets.results}}"
+      wait_for: host={{ item.droplet.ip_address }} port=22 delay=10 timeout=180 state=started
+      with_items: "{{ droplets.results }}"
       when: state == 'present'
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index 3e7093bb9..7f2de0dd6 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -14,39 +14,39 @@
 
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
           {%- elif mode == 'aio' -%}
-          k8s-{{test_name}}-1
+          k8s-{{ test_name }}-1
           {%- else -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
     - name: Create gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         machine_type: "{{ cloud_machine_type }}"
         image: "{{ cloud_image | default(omit) }}"
         image_family: "{{ cloud_image_family | default(omit) }}"
         preemptible: "{{ preemptible }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region}}"
-        metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script|default("")}}"}'
-        tags: "build-{{test_name}},{{kube_network_plugin}}"
+        zone: "{{ cloud_region }}"
+        metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}'
+        tags: "build-{{ test_name }},{{ kube_network_plugin }}"
         ip_forward: yes
         service_account_permissions: ['compute-rw']
       register: gce
 
     - name: Add instances to host group
-      add_host: hostname={{item.public_ip}} groupname="waitfor_hosts"
-      with_items: '{{gce.instance_data}}'
+      add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
+      with_items: '{{ gce.instance_data }}'
 
     - name: Template the inventory
       template:
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index 53d0164c1..a5b4a6e4d 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -8,25 +8,25 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
           {%- else -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
     - name: stop gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region | default('europe-west1-b')}}"
+        zone: "{{ cloud_region | default('europe-west1-b') }}"
         state: 'stopped'
       async: 120
       poll: 3
@@ -35,13 +35,13 @@
 
     - name: delete gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region | default('europe-west1-b')}}"
+        zone: "{{ cloud_region | default('europe-west1-b') }}"
         state: 'absent'
       async: 120
       poll: 3
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index d598d6044..39cec6f6a 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -16,7 +16,7 @@
         test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
 
     - set_fact:
-        file_name: "{{ostype}}-{{kube_network_plugin}}-{{commit}}-logs.tar.gz"
+        file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
 
     - name: Create a bucket
       gc_storage:
@@ -30,31 +30,31 @@
     - name: Create a lifecycle template for the bucket
       template:
         src: gcs_life.json.j2
-        dest: "{{dir}}/gcs_life.json"
+        dest: "{{ dir }}/gcs_life.json"
 
     - name: Create a boto config to access GCS
       template:
         src: boto.j2
-        dest: "{{dir}}/.boto"
+        dest: "{{ dir }}/.boto"
       no_log: True
 
     - name: Download gsutil cp installer
       get_url:
         url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
-        dest: "{{dir}}/gcp-installer.sh"
+        dest: "{{ dir }}/gcp-installer.sh"
 
     - name: Get gsutil tool
-      script: "{{dir}}/gcp-installer.sh"
+      script: "{{ dir }}/gcp-installer.sh"
       environment:
         CLOUDSDK_CORE_DISABLE_PROMPTS: 1
-        CLOUDSDK_INSTALL_DIR: "{{dir}}"
+        CLOUDSDK_INSTALL_DIR: "{{ dir }}"
       no_log: True
       failed_when: false
 
     - name: Apply the lifecycle rules
-      command: "{{dir}}/google-cloud-sdk/bin/gsutil lifecycle set {{dir}}/gcs_life.json gs://{{test_name}}"
+      command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
       environment:
-        BOTO_CONFIG: "{{dir}}/.boto"
+        BOTO_CONFIG: "{{ dir }}/.boto"
       no_log: True
 
     - name: Upload collected diagnostic info
@@ -63,13 +63,13 @@
         mode: put
         permission: public-read
         object: "{{ file_name }}"
-        src: "{{dir}}/logs.tar.gz"
+        src: "{{ dir }}/logs.tar.gz"
         headers: '{"Content-Encoding": "x-gzip"}'
         gs_access_key: "{{ gs_key }}"
         gs_secret_key: "{{ gs_skey }}"
-        expiration: "{{expire_days * 36000|int}}"
+        expiration: "{{ expire_days * 36000|int }}"
       failed_when: false
       no_log: True
 
     - debug:
-        msg: "A public url https://storage.googleapis.com/{{test_name}}/{{file_name}}"
+        msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
diff --git a/tests/testcases/015_check-pods-running.yml b/tests/testcases/015_check-pods-running.yml
index c1e4a6629..28c5d8016 100644
--- a/tests/testcases/015_check-pods-running.yml
+++ b/tests/testcases/015_check-pods-running.yml
@@ -12,14 +12,14 @@
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces --no-headers -o yaml"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
     register: run_pods_log
     until:
     # Check that all pods are running
@@ -32,9 +32,9 @@
     no_log: true
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 1aba38688..a88df1052 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -15,13 +15,13 @@
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
   - name: Create test namespace
-    shell: "{{bin_dir}}/kubectl create namespace test"
+    shell: "{{ bin_dir }}/kubectl create namespace test"
 
   - name: Run a replica controller composed of 2 pods in test ns
-    shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null"
+    shell: "{{ bin_dir }}/kubectl run test --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --replicas=2 --command -- tail -f /dev/null"
 
   - name: Check that all pods are running and ready
-    shell: "{{bin_dir}}/kubectl get pods --namespace test --no-headers -o yaml"
+    shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
     register: run_pods_log
     until:
     # Check that all pods are running
@@ -34,31 +34,31 @@
     no_log: true
 
   - name: Get pod names
-    shell: "{{bin_dir}}/kubectl get pods -n test -o json"
+    shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
     register: pods
     no_log: true
 
-  - debug: msg="{{pods.stdout.split('\n')}}"
+  - debug: msg="{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
   - name: Get hostnet pods
-    command: "{{bin_dir}}/kubectl get pods -n test -o
+    command: "{{ bin_dir }}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: hostnet_pods
     no_log: true
 
   - name: Get running pods
-    command: "{{bin_dir}}/kubectl get pods -n test -o
+    command: "{{ bin_dir }}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: running_pods
     no_log: true
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
 
   - set_fact:
       kube_pods_subnet: 10.233.64.0/18
@@ -66,30 +66,30 @@
       pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
       pods_hostnet: |
         {% set list = hostnet_pods.stdout.split(" ") %}
-        {{list}}
+        {{ list }}
       pods_running: |
         {% set list = running_pods.stdout.split(" ") %}
-        {{list}}
+        {{ list }}
 
   - name: Check pods IP are in correct network
     assert:
       that: item | ipaddr(kube_pods_subnet)
     when: not item in pods_hostnet and item in pods_running
-    with_items: "{{pod_ips}}"
+    with_items: "{{ pod_ips }}"
 
   - name: Ping between pods is working
-    shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
+    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
     with_nested:
-    - "{{pod_names}}"
-    - "{{pod_ips}}"
+    - "{{ pod_names }}"
+    - "{{ pod_ips }}"
 
   - name: Ping between hostnet pods is working
-    shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
+    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when: item[0] in pods_hostnet and item[1] in pods_hostnet
     with_nested:
-    - "{{pod_names}}"
-    - "{{pod_ips}}"
+    - "{{ pod_names }}"
+    - "{{ pod_ips }}"
 
   - name: Delete test namespace
-    shell: "{{bin_dir}}/kubectl delete namespace test"
+    shell: "{{ bin_dir }}/kubectl delete namespace test"
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 8b85760f8..c1264f842 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -24,8 +24,8 @@
       when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
     - name: Wait for netchecker server
-      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep ^netchecker-server"
-      delegate_to: "{{groups['kube-master'][0]}}"
+      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       run_once: true
       register: ncs_pod
       until: ncs_pod.stdout.find('Running') != -1
@@ -33,18 +33,18 @@
       delay: 10
 
     - name: Wait for netchecker agents
-      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep '^netchecker-agent-.*Running'"
+      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       register: nca_pod
       until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
       retries: 3
       delay: 10
       failed_when: false
 
-    - command: "{{ bin_dir }}/kubectl -n {{netcheck_namespace}} describe pod -l app={{ item }}"
+    - command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
       with_items:
         - netchecker-agent
@@ -56,9 +56,9 @@
       run_once: true
 
     - name: Get netchecker agents
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/agents/ return_content=yes
+      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/ return_content=yes
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       register: agents
       retries: 18
       delay: "{{ agent_report_interval }}"
@@ -77,8 +77,8 @@
         - agents.content[0] == '{'
 
     - name: Check netchecker status
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/connectivity_check status_code=200 return_content=yes
-      delegate_to: "{{groups['kube-master'][0]}}"
+      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check status_code=200 return_content=yes
+      delegate_to: "{{ groups['kube-master'][0] }}"
       run_once: true
       register: result
       retries: 3
@@ -97,13 +97,13 @@
     - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
       run_once: true
       when: not result is success
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
 
-    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{item}} --all-containers"
+    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
       run_once: true
       when: not result is success
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
       with_items:
         - kube-router
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 4cdbaeb72..5ea8da37d 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -38,7 +38,7 @@
   pre_tasks:
     - name: gather facts from all instances
       setup:
-      delegate_to: "{{item}}"
+      delegate_to: "{{ item }}"
       delegate_facts: True
       with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
 
@@ -50,7 +50,7 @@
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: container-engine, tags: "container-engine", when: deploy_container_engine|default(true) }
     - { role: download, tags: download, when: "not skip_downloads" }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -76,7 +76,7 @@
     - { role: kubernetes/client, tags: client }
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - name: Upgrade calico on all masters and nodes
   hosts: kube-master:kube-node
@@ -98,7 +98,7 @@
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master[0]
   any_errors_fatal: true
@@ -112,14 +112,14 @@
   roles:
     - { role: kubespray-defaults}
     - { role: network_plugin/calico/rr, tags: network }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-- 
GitLab