diff --git a/.ansible-lint b/.ansible-lint
index 2bd18414cdfb7c196326fa2cef15228a3717e577..c44f782b662f3e427bcafb6c88b115f2c29bebed 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -5,7 +5,6 @@ skip_list:
   # The following rules throw errors.
   # These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
   - '204'
-  - '206'
   - '301'
   - '305'
   - '306'
diff --git a/cluster.yml b/cluster.yml
index cc48fe45965494a7da10379bf4d9eabc09a5fd22..1ee5fc2b74e165d6c04709bcf1be46aac025b831 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -34,7 +34,7 @@
   pre_tasks:
     - name: gather facts from all instances
       setup:
-      delegate_to: "{{item}}"
+      delegate_to: "{{ item }}"
       delegate_facts: true
       with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
       run_once: true
@@ -46,7 +46,7 @@
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
     - { role: download, tags: download, when: "not skip_downloads" }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -65,7 +65,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/node, tags: node }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -109,7 +109,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
index b1e5c0ccc855404cb5738f4c787603d27f1a10a3..409555fd00fe78c40212800cffb1b8d47da0cb6d 100644
--- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory/tasks/main.yml
@@ -8,4 +8,6 @@
     vm_list: "{{ vm_list_cmd.stdout }}"
 
 - name: Generate inventory
-  template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
+  template:
+    src: inventory.j2
+    dest: "{{ playbook_dir }}/inventory"
diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
index e7802b3a170afb351e4ae2121b9cf5b1a411f7be..1772b1c29298dfcc7886e74fd6b33fa8453616bf 100644
--- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
@@ -13,4 +13,6 @@
     vm_roles_list: "{{ vm_list_cmd.stdout }}"
 
 - name: Generate inventory
-  template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
+  template:
+    src: inventory.j2
+    dest: "{{ playbook_dir }}/inventory"
diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml
index 4ee6d858c4d0fd121380fb941b46d2d1733bb7ac..92a0e87c9f8a32c98aed1e435d62627dd15feaf1 100644
--- a/contrib/azurerm/roles/generate-templates/tasks/main.yml
+++ b/contrib/azurerm/roles/generate-templates/tasks/main.yml
@@ -1,10 +1,15 @@
 ---
 - set_fact:
-    base_dir: "{{playbook_dir}}/.generated/"
+    base_dir: "{{ playbook_dir }}/.generated/"
 
-- file: path={{base_dir}} state=directory recurse=true
+- file:
+    path: "{{ base_dir }}"
+    state: directory
+    recurse: true
 
-- template: src={{item}} dest="{{base_dir}}/{{item}}"
+- template:
+    src: "{{ item }}"
+    dest: "{{ base_dir }}/{{ item }}"
   with_items:
     - network.json
     - storage.json
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index affc99ea1818d94487e1092322243a9b5d62bec1..5b7c77e497c287fda5f48b5ff350e999f9a175fd 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -12,7 +12,7 @@
 - name: Null-ify some linux tools to ease DIND
   file:
     src: "/bin/true"
-    dest: "{{item}}"
+    dest: "{{ item }}"
     state: link
     force: yes
   with_items:
@@ -52,7 +52,7 @@
     - rsyslog
     - "{{ distro_ssh_service }}"
 
-- name: Create distro user "{{distro_user}}"
+- name: Create distro user "{{ distro_user }}"
   user:
     name: "{{ distro_user }}"
     uid: 1000
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index d125414c13660d05e73fe102d68174415a71e25c..40ca53cd6d9607113ee59f5cd8e1fc966d63d73d 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -28,7 +28,7 @@
       - /lib/modules:/lib/modules
       - "{{ item }}:/dind/docker"
   register: containers
-  with_items: "{{groups.containers}}"
+  with_items: "{{ groups.containers }}"
   tags:
     - addresses
 
diff --git a/contrib/metallb/roles/provision/tasks/main.yml b/contrib/metallb/roles/provision/tasks/main.yml
index 6b9661de41eb4b29cf33e91d872adb611f1535da..66fcc591c41d2e86d5563c7c31e6eca9151eccb5 100644
--- a/contrib/metallb/roles/provision/tasks/main.yml
+++ b/contrib/metallb/roles/provision/tasks/main.yml
@@ -9,7 +9,7 @@
 - name: "Kubernetes Apps | Install and configure MetalLB"
   kube:
     name: "MetalLB"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/{{ item.item }}"
     state: "{{ item.changed | ternary('latest','present') }}"
   become: true
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
index 2e108701aaf5fa30a83e2f9cb6837794bbc8a9f7..baf8356b6d8a4e9f8d49b0b89aa8e9f2f100a03b 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
@@ -1,6 +1,8 @@
 ---
 - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
-  template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
+  template:
+    src: "{{ item.file }}"
+    dest: "{{ kube_config_dir }}/{{ item.dest }}"
   with_items:
     - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
     - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
@@ -12,9 +14,9 @@
   kube:
     name: glusterfs
     namespace: default
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.dest}}"
-    state: "{{item.changed | ternary('latest','present') }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
+    state: "{{ item.changed | ternary('latest','present') }}"
   with_items: "{{ gluster_pv.results }}"
   when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index ac5115a00fadef5eecbe0e33b0bdd8f1c15c83e6..93b473295a9634397855c5fa08943b39827ce24d 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Wait for heketi bootstrap to complete."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
index be3c42cafa710af942c100c3a7ac74f305824c7a..63a475a85cc37861a09d1c676bdbb35982d14af4 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
@@ -6,7 +6,7 @@
 - name: "Create heketi storage."
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
     state: "present"
   vars:
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
index 105d9e2ac26dd8d4395f543a4c359631751c8a0e..5f00e28aa8145ec2afeb4f3380c069df6b2bfff9 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Kubernetes Apps | Label GlusterFS nodes"
@@ -33,6 +33,6 @@
 - name: "Kubernetes Apps | Install and configure Heketi Service Account"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-service-account.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 2052abefcaf5b0f31dc98c453e3e4d2832ed96ed..d322f6ff8dceb5327846625a760908f721817f49 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -6,7 +6,7 @@
 - name: "Kubernetes Apps | Install and configure Heketi"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-deployment.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
 - name: "Ensure heketi is up and running."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
index 23a2b4f9c72899af389d62662e608badb17d33ca..1feb27d7b5de3b5b1243b3a59de0fe606ef0724c 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
@@ -7,7 +7,7 @@
 
 - name: "Kubernetes Apps | Test Heketi"
   register: "heketi_service_state"
-  command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
   changed_when: false
 
 - name: "Kubernetes Apps | Bootstrap Heketi"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
index 364bb29b250b4e94012f860dcd89ba1afda50767..96f2430485f36c55aba736594e7f42970f3a0781 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -1,19 +1,19 @@
 ---
 - register: "clusterrolebinding_state"
-  command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
 - name: "Kubernetes Apps | Deploy cluster role binding."
   when: "clusterrolebinding_state.stdout == \"\""
-  command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
+  command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
 - register: "clusterrolebinding_state"
-  command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
   changed_when: false
 - assert:
     that: "clusterrolebinding_state.stdout != \"\""
     msg: "Cluster role binding is not present."
 
 - register: "secret_state"
-  command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
 - name: "Render Heketi secret configuration."
   become: true
@@ -22,9 +22,9 @@
     dest: "{{ kube_config_dir }}/heketi.json"
 - name: "Deploy Heketi config secret"
   when: "secret_state.stdout == \"\""
-  command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
+  command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
 - register: "secret_state"
-  command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+  command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
   changed_when: false
 - assert:
     that: "secret_state.stdout != \"\""
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
index f3861d9ec92752ea9b3d502c70433860f385f766..210930804a5c93d03e3ea4f0718bf5e8911ef143 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
@@ -7,6 +7,6 @@
 - name: "Kubernetes Apps | Install and configure Heketi Storage"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/heketi-storage.json"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index f878876bc11c5d953879e71b475a843340ce309c..5bf3e3c4d54be6532c3f0c889d6f57223b196d28 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -20,6 +20,6 @@
 - name: "Kubernetes Apps | Install and configure Storace Class"
   kube:
     name: "GlusterFS"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/storageclass.yml"
     state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/vault/roles/vault/tasks/shared/check_etcd.yml b/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
index 9ebed2bf1508a8c702b7a752741160d9c77c17a5..f8599d5367ed4ff9508b6d79c78aff659d1f14d5 100644
--- a/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
+++ b/contrib/vault/roles/vault/tasks/shared/check_etcd.yml
@@ -11,7 +11,7 @@
   until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
   retries: 3
   delay: 2
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   run_once: true
   failed_when: false
   register: vault_etcd_health_check
diff --git a/mitogen.yaml b/mitogen.yaml
index fa9d4ec54dc2efe066942505f9598ca55ac081c2..853c39f9c3ac4a88f9f3888185310e42fd1c3553 100644
--- a/mitogen.yaml
+++ b/mitogen.yaml
@@ -3,29 +3,29 @@
   strategy: linear
   vars:
     mitogen_version: master
-    mitogen_url: https://github.com/dw/mitogen/archive/{{mitogen_version}}.zip
+    mitogen_url: https://github.com/dw/mitogen/archive/{{ mitogen_version }}.zip
   tasks:
     - name: Create mitogen plugin dir
       file:
-        path: "{{item}}"
+        path: "{{ item }}"
         state: directory
       become: false
       loop:
-        - "{{playbook_dir}}/plugins/mitogen"
-        - "{{playbook_dir}}/dist"
+        - "{{ playbook_dir }}/plugins/mitogen"
+        - "{{ playbook_dir }}/dist"
 
     - name: download mitogen release
       get_url:
-        url: "{{mitogen_url}}"
-        dest: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
+        url: "{{ mitogen_url }}"
+        dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
         validate_certs: true
 
     - name: extract zip
       unarchive:
-        src: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
-        dest: "{{playbook_dir}}/dist/"
+        src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
+        dest: "{{ playbook_dir }}/dist/"
 
     - name: copy plugin
       synchronize:
-        src: "{{playbook_dir}}/dist/mitogen-{{mitogen_version}}/"
-        dest: "{{playbook_dir}}/plugins/mitogen"
+        src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
+        dest: "{{ playbook_dir }}/plugins/mitogen"
diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml
index 3854ec4119a194a41c73564c78112a87faa109e7..774eb412b6b1b401f7a9c4f4c13ac0ef454a52b1 100644
--- a/roles/adduser/tasks/main.yml
+++ b/roles/adduser/tasks/main.yml
@@ -1,15 +1,15 @@
 ---
 - name: User | Create User Group
   group:
-    name: "{{user.group|default(user.name)}}"
-    system: "{{user.system|default(omit)}}"
+    name: "{{ user.group|default(user.name) }}"
+    system: "{{ user.system|default(omit) }}"
 
 - name: User | Create User
   user:
-    comment: "{{user.comment|default(omit)}}"
-    createhome: "{{user.createhome|default(omit)}}"
-    group: "{{user.group|default(user.name)}}"
-    home: "{{user.home|default(omit)}}"
-    shell: "{{user.shell|default(omit)}}"
-    name: "{{user.name}}"
-    system: "{{user.system|default(omit)}}"
+    comment: "{{ user.comment|default(omit) }}"
+    createhome: "{{ user.createhome|default(omit) }}"
+    group: "{{ user.group|default(user.name) }}"
+    home: "{{ user.home|default(omit) }}"
+    shell: "{{ user.shell|default(omit) }}"
+    name: "{{ user.name }}"
+    system: "{{ user.system|default(omit) }}"
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index c9f677567831ba8ca77dd3f853393fddea843e78..0b979cc7d3b0c32cb98abafeae08c14b6db2c934 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -54,8 +54,8 @@
 - name: ensure docker-ce repository public key is installed
   action: "{{ docker_repo_key_info.pkg_key }}"
   args:
-    id: "{{item}}"
-    url: "{{docker_repo_key_info.url}}"
+    id: "{{ item }}"
+    url: "{{ docker_repo_key_info.url }}"
     state: present
   register: keyserver_task_result
   until: keyserver_task_result is succeeded
@@ -67,7 +67,7 @@
 - name: ensure docker-ce repository is enabled
   action: "{{ docker_repo_info.pkg_repo }}"
   args:
-    repo: "{{item}}"
+    repo: "{{ item }}"
     state: present
   with_items: "{{ docker_repo_info.repos }}"
   when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) and (docker_repo_info.repos|length > 0)
@@ -75,8 +75,8 @@
 - name: ensure docker-engine repository public key is installed
   action: "{{ dockerproject_repo_key_info.pkg_key }}"
   args:
-    id: "{{item}}"
-    url: "{{dockerproject_repo_key_info.url}}"
+    id: "{{ item }}"
+    url: "{{ dockerproject_repo_key_info.url }}"
     state: present
   register: keyserver_task_result
   until: keyserver_task_result is succeeded
@@ -90,7 +90,7 @@
 - name: ensure docker-engine repository is enabled
   action: "{{ dockerproject_repo_info.pkg_repo }}"
   args:
-    repo: "{{item}}"
+    repo: "{{ item }}"
     state: present
   with_items: "{{ dockerproject_repo_info.repos }}"
   when:
@@ -123,7 +123,7 @@
     baseurl: "{{ extras_rh_repo_base_url }}"
     file: "extras"
     gpgcheck: yes
-    gpgkey: "{{extras_rh_repo_gpgkey}}"
+    gpgkey: "{{ extras_rh_repo_gpgkey }}"
     keepcache: "{{ docker_rpm_keepcache | default('1') }}"
     proxy: " {{ http_proxy | default('_none_') }}"
   when:
@@ -148,10 +148,10 @@
 - name: ensure docker packages are installed
   action: "{{ docker_package_info.pkg_mgr }}"
   args:
-    pkg: "{{item.name}}"
-    force: "{{item.force|default(omit)}}"
-    conf_file: "{{item.yum_conf|default(omit)}}"
-    state: "{{item.state | default('present')}}"
+    pkg: "{{ item.name }}"
+    force: "{{ item.force|default(omit) }}"
+    conf_file: "{{ item.yum_conf|default(omit) }}"
+    state: "{{ item.state | default('present') }}"
     update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
   register: docker_task_result
   until: docker_task_result is succeeded
@@ -166,7 +166,7 @@
   action: "{{ docker_package_info.pkg_mgr }}"
   args:
     name: "{{ item.name }}"
-    state: "{{item.state | default('present')}}"
+    state: "{{ item.state | default('present') }}"
   with_items: "{{ docker_package_info.pkgs }}"
   register: docker_task_result
   until: docker_task_result is succeeded
@@ -185,7 +185,7 @@
 
 - name: show available packages on ubuntu
   fail:
-    msg: "{{available_packages}}"
+    msg: "{{ available_packages }}"
   when:
     - docker_task_result is failed
     - ansible_distribution == 'Ubuntu'
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index 3e621f5246b4b45bf279c6f06f949a0ad16a2f9f..99b9f0e26e1788c14c39b682f635c732f76e5805 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -2,11 +2,11 @@
 
 - name: set dns server for docker
   set_fact:
-    docker_dns_servers: "{{dns_servers}}"
+    docker_dns_servers: "{{ dns_servers }}"
 
 - name: show docker_dns_servers
   debug:
-    msg: "{{docker_dns_servers}}"
+    msg: "{{ docker_dns_servers }}"
 
 - name: set base docker dns facts
   set_fact:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index a661c0d0003cad151976079cea0baf32b3439196..b8e642bf5733e6c316daa4cb8e5c3c7e7ccd6965 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -29,7 +29,7 @@ download_always_pull: False
 download_validate_certs: True
 
 # Use the first kube-master if download_localhost is not set
-download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
+download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
 
 # Arch of Docker images and needed packages
 image_arch: "{{host_architecture | default('amd64')}}"
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 3993ef6b646a9085fc9f2305226fa31b13910867..2bfb5f70f8d3a1becd53413e2e85e838e5a1be93 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -7,7 +7,7 @@
 
 - name: file_download | Create dest directory
   file:
-    path: "{{download.dest|dirname}}"
+    path: "{{ download.dest | dirname }}"
     state: directory
     recurse: yes
   when:
@@ -20,9 +20,9 @@
 #   to one task in the future.
 - name: file_download | Download item (delegate)
   get_url:
-    url: "{{download.url}}"
-    dest: "{{download.dest}}"
-    sha256sum: "{{download.sha256 | default(omit)}}"
+    url: "{{ download.url }}"
+    dest: "{{ download.dest }}"
+    sha256sum: "{{ download.sha256|default(omit) }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     validate_certs: "{{ download_validate_certs }}"
@@ -43,9 +43,9 @@
 
 - name: file_download | Download item (all)
   get_url:
-    url: "{{download.url}}"
-    dest: "{{download.dest}}"
-    sha256sum: "{{download.sha256 | default(omit)}}"
+    url: "{{ download.url }}"
+    dest: "{{ download.dest }}"
+    sha256sum: "{{ download.sha256|default(omit) }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     validate_certs: "{{ download_validate_certs }}"
@@ -64,8 +64,8 @@
 
 - name: file_download | Extract archives
   unarchive:
-    src: "{{download.dest}}"
-    dest: "{{download.dest|dirname}}"
+    src: "{{ download.dest }}"
+    dest: "{{ download.dest |dirname }}"
     owner: "{{ download.owner|default(omit) }}"
     mode: "{{ download.mode|default(omit) }}"
     copy: no
diff --git a/roles/download/tasks/download_prep.yml b/roles/download/tasks/download_prep.yml
index 40ee8e981f2b476b1e782d324c8572ae853cf019..6bb48fcbce5d5ea871b10a242f9ea4f5e6b4a642 100644
--- a/roles/download/tasks/download_prep.yml
+++ b/roles/download/tasks/download_prep.yml
@@ -11,16 +11,16 @@
 
 - name: container_download | Create dest directory for saved/loaded container images
   file:
-    path: "{{local_release_dir}}/containers"
+    path: "{{ local_release_dir }}/containers"
     state: directory
     recurse: yes
     mode: 0755
-    owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
+    owner: "{{ ansible_ssh_user|default(ansible_user_id) }}"
   when: download_container
 
 - name: container_download | create local directory for saved/loaded container images
   file:
-    path: "{{local_release_dir}}/containers"
+    path: "{{ local_release_dir }}/containers"
     state: directory
     recurse: yes
   delegate_to: localhost
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 84ed88760897cb78badad7e345c92a27f08cdc3e..6fb00e5c03b7afb63016671518f65a45f58030b2 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -5,7 +5,7 @@
 
 - set_fact:
     pull_args: >-
-      {%- if pull_by_digest %}{{download.repo}}@sha256:{{download.sha256}}{%- else -%}{{download.repo}}:{{download.tag}}{%- endif -%}
+      {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
 
 - name: Register docker images info
   shell: >-
@@ -33,7 +33,7 @@
 
 - name: Check the local digest sha256 corresponds to the given image tag
   assert:
-    that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')"
+    that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
   when:
     - not download_always_pull
     - not pull_required
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index 767469422cefc4b9e12c01273ec01e3ff4282e4e..fd46766ee5c3c705809551f25952da35054e33ed 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -8,7 +8,7 @@
     - facts
 
 - set_fact:
-    fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
+    fname: "{{ local_release_dir }}/containers/{{ download.repo|regex_replace('/|\0|:', '_') }}:{{ download.tag|default(download.sha256)|regex_replace('/|\0|:', '_') }}.tar"
   run_once: true
   when:
     - download.enabled
@@ -20,7 +20,7 @@
 
 - name: "container_download | Set default value for 'container_changed' to false"
   set_fact:
-    container_changed: "{{pull_required|default(false)}}"
+    container_changed: "{{ pull_required|default(false) }}"
   when:
     - download.enabled
     - download.container
diff --git a/roles/download/tasks/sync_file.yml b/roles/download/tasks/sync_file.yml
index 530a8237dd85f3581ce024ad2811e244622b17f6..6813b0534c42df5398296f5c2f2f00cb2892738a 100644
--- a/roles/download/tasks/sync_file.yml
+++ b/roles/download/tasks/sync_file.yml
@@ -1,7 +1,7 @@
 ---
 - name: file_download | create local download destination directory
   file:
-    path: "{{download.dest|dirname}}"
+    path: "{{ download.dest|dirname }}"
     state: directory
     recurse: yes
     mode: 0755
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index b11a2e9e4a3727032736d32a27d31ffad8b6c5c4..e0ee9f7e9e13156536cd367163e990614097baaf 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -4,7 +4,7 @@
     paths: "{{ etcd_cert_dir }}"
     patterns: "ca.pem,node*.pem"
     get_checksum: true
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   register: etcdcert_master
   run_once: true
 
@@ -30,10 +30,10 @@
   with_items: "{{ expected_files }}"
   vars:
     expected_files: >-
-       ['{{etcd_cert_dir}}/ca.pem',
+       ['{{ etcd_cert_dir }}/ca.pem',
        {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
        {% for host in all_etcd_hosts %}
-       '{{etcd_cert_dir}}/node-{{ host }}-key.pem'
+       '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
        {% if not loop.last %}{{','}}{% endif %}
        {% endfor %}]
 
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 63208d54a1fc19b62ad34bbfb1a2e8027e9fa5b4..66b2030a5381f45769b88e1800e27e0187219bc7 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -8,7 +8,7 @@
     mode: 0700
     recurse: yes
 
-- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
+- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
   file:
     path: "{{ etcd_script_dir }}"
     state: directory
@@ -16,9 +16,9 @@
     mode: 0700
   run_once: yes
   when: inventory_hostname == groups['etcd'][0]
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
 
-- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
+- name: "Gen_certs | create etcd cert dir (on {{ groups['etcd'][0] }})"
   file:
     path: "{{ etcd_cert_dir }}"
     group: "{{ etcd_cert_group }}"
@@ -28,14 +28,14 @@
     mode: 0700
   run_once: yes
   when: inventory_hostname == groups['etcd'][0]
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
 
 - name: Gen_certs | write openssl config
   template:
     src: "openssl.conf.j2"
     dest: "{{ etcd_config_dir }}/openssl.conf"
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
     - inventory_hostname == groups['etcd'][0]
@@ -46,7 +46,7 @@
     dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
     mode: 0700
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
     - inventory_hostname == groups['etcd'][0]
@@ -65,7 +65,7 @@
                 {% endif %}
               {% endfor %}"
   run_once: yes
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs|default(false)
   notify: set etcd_secret_changed
@@ -87,7 +87,7 @@
         '{{ etcd_cert_dir }}/node-{{ node }}.pem',
         '{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
         {% endfor %}]"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - inventory_hostname in groups['etcd']
     - sync_certs|default(false)
@@ -133,13 +133,13 @@
   no_log: true
   register: etcd_node_certs
   check_mode: no
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
         inventory_hostname in groups['k8s-cluster']) and
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | Copy certs on nodes
-  shell: "base64 -d <<< '{{etcd_node_certs.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
+  shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
   args:
     executable: /bin/bash
   no_log: true
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 88f78be00db1e0b30205fc2e2bccfe9362e704bd..c729b880d5e5402ea266dcf82c0360f8b322812e 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -8,9 +8,9 @@
   set_fact:
     host_architecture: >-
       {%- if ansible_architecture in architecture_groups -%}
-      {{architecture_groups[ansible_architecture]}}
+      {{ architecture_groups[ansible_architecture] }}
       {%- else -%}
-       {{ansible_architecture}}
+       {{ ansible_architecture }}
       {% endif %}
 
 - include_tasks: check_certs.yml
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index cf115db77c6edf98157e30bbef6662649cb97e5c..d99700dbbe1ef6d74a93dcc9be1b978d6e29ce75 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -13,7 +13,7 @@
     name: "netchecker-server"
     namespace: "{{ netcheck_namespace }}"
     filename: "{{ netchecker_server_manifest.stat.path }}"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "deploy"
     state: latest
   when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
@@ -39,13 +39,13 @@
 
 - name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
   set_fact:
-    netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates}}"
+    netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
   when: podsecuritypolicy_enabled
 
 - name: Kubernetes Apps | Lay Down Netchecker Template
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items: "{{ netchecker_templates }}"
   register: manifests
   when:
@@ -53,11 +53,11 @@
 
 - name: Kubernetes Apps | Start Netchecker Resources
   kube:
-    name: "{{item.item.name}}"
-    namespace: "{{netcheck_namespace}}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    name: "{{ item.item.name }}"
+    namespace: "{{ netcheck_namespace }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index f58dda1bb06b1ab61980a7496c4c9e33c1a7204f..6754174925d33f51cc9c857b0b1032d5f07c1223 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -41,10 +41,10 @@
 
 - name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
   kube:
-    name: "{{item.item.name}}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    name: "{{ item.item.name }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   register: result
   until: result is succeeded
@@ -69,7 +69,7 @@
 - name: Apply workaround to allow all nodes with cert O=system:nodes to register
   kube:
     name: "kubespray:system:node"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/node-crb.yml"
     state: latest
@@ -96,7 +96,7 @@
 - name: Apply webhook ClusterRole
   kube:
     name: "system:node-webhook"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrole"
     filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
     state: latest
@@ -121,7 +121,7 @@
 - name: Grant system:nodes the webhook ClusterRole
   kube:
     name: "system:node-webhook"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
     state: latest
@@ -164,7 +164,7 @@
 - name: Apply vsphere-cloud-provider ClusterRole
   kube:
     name: "system:vsphere-cloud-provider"
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
     state: latest
@@ -194,7 +194,7 @@
 - name: PriorityClass | Create k8s-cluster-critical
   kube:
     name: k8s-cluster-critical
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: "PriorityClass"
     filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     state: latest
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
index 54ee49d78ea851e2ca4a3827590eb52525e8659d..22b39b3d407b238c7f449add021aa38b030ab7dc 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -10,7 +10,7 @@
 
 - name: Apply OCI RBAC
   kube:
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/oci-rbac.yml"
   when:
   - cloud_provider is defined
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
index 50822be7d4678b1e655fb4a01927e984a795c241..fd3ea42fa67b04eb4b2f322035befaffc9ffc67f 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
@@ -13,12 +13,12 @@
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
   set_fact:
-    nvidia_driver_download_url_default: "{{nvidia_gpu_tesla_base_url}}{{nvidia_url_end}}"
+    nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
   when: nvidia_gpu_flavor|lower == "tesla"
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
   set_fact:
-    nvidia_driver_download_url_default: "{{nvidia_gpu_gtx_base_url}}{{nvidia_url_end}}"
+    nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
   when: nvidia_gpu_flavor|lower == "gtx"
 
 - name: Container Engine Acceleration Nvidia GPU | Create addon dir
@@ -49,6 +49,6 @@
     filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
     state: "latest"
   with_items:
-    - "{{container_engine_accelerator_manifests.results}}"
+    - "{{ container_engine_accelerator_manifests.results }}"
   when:
     - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index 6b970317eba49808913d134c0296059d6f6cc004..2359588b593e5e350e5b1885f827b315a3f7766a 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -9,7 +9,7 @@
   delegate_to: "{{ item[0] }}"
   with_nested:
     - "{{ groups['k8s-cluster'] }}"
-    - "{{ local_volume_provisioner_storage_classes.keys() | list}}"
+    - "{{ local_volume_provisioner_storage_classes.keys() | list }}"
 
 - name: Local Volume Provisioner | Create addon dir
   file:
diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
index 69d0cd2f93ef3b205cadb53c9e96b7227c1ac6da..053fbc0db44ac4b99a27da672e2dbb5b55dbceaf 100644
--- a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
+++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
@@ -1,15 +1,15 @@
 ---
-- name: "Gen_helm_tiller_certs | Create helm config directory (on {{groups['kube-master'][0]}})"
+- name: "Gen_helm_tiller_certs | Create helm config directory (on {{ groups['kube-master'][0] }})"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   file:
     path: "{{ helm_config_dir }}"
     state: directory
     owner: kube
 
-- name: "Gen_helm_tiller_certs | Create helm script directory (on {{groups['kube-master'][0]}})"
+- name: "Gen_helm_tiller_certs | Create helm script directory (on {{ groups['kube-master'][0] }})"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   file:
     path: "{{ helm_script_dir }}"
     state: directory
@@ -17,24 +17,24 @@
 
 - name: Gen_helm_tiller_certs | Copy certs generation script
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   template:
     src: "helm-make-ssl.sh.j2"
     dest: "{{ helm_script_dir }}/helm-make-ssl.sh"
     mode: 0700
 
-- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{groups['kube-master'][0]}})"
+- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{ groups['kube-master'][0] }})"
   find:
     paths: "{{ helm_home_dir }}"
     patterns: "*.pem"
     get_checksum: true
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   register: helmcert_master
   run_once: true
 
 - name: Gen_helm_tiller_certs | run cert generation script
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
 
 - set_fact:
@@ -64,7 +64,7 @@
   no_log: true
   register: helm_client_cert_data
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
 
 - name: Gen_helm_tiller_certs | Use tempfile for unpacking certs on masters
@@ -78,8 +78,8 @@
 
 - name: Gen_helm_tiller_certs | Write helm client certs to tempfile
   copy:
-    content: "{{helm_client_cert_data.stdout}}"
-    dest: "{{helm_cert_tempfile.path}}"
+    content: "{{ helm_client_cert_data.stdout }}"
+    dest: "{{ helm_cert_tempfile.path }}"
     owner: root
     mode: "0600"
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
@@ -93,7 +93,7 @@
 
 - name: Gen_helm_tiller_certs | Cleanup tempfile on masters
   file:
-    path: "{{helm_cert_tempfile.path}}"
+    path: "{{ helm_cert_tempfile.path }}"
     state: absent
   when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
 
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 2a9843de7ca05a2fc4f48e470209874ebdd76869..900261fd2958d8602de82076a579c95a477b3458 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -7,8 +7,8 @@
 
 - name: Helm | Lay Down Helm Manifests (RBAC)
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: tiller, file: tiller-namespace.yml, type: namespace}
     - {name: tiller, file: tiller-sa.yml, type: sa}
@@ -20,11 +20,11 @@
 
 - name: Helm | Apply Helm Manifests (RBAC)
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "{{ tiller_namespace }}"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
   when:
@@ -56,7 +56,7 @@
     {% endif %}
   register: install_helm
   changed_when: false
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 # FIXME: https://github.com/helm/helm/issues/4063
 - name: Helm | Force apply tiller overrides if necessary
@@ -73,12 +73,12 @@
     {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
     {% if tiller_wait %} --wait{% endif %}
     --output yaml
-    | {{bin_dir}}/kubectl apply -f -
+    | {{ bin_dir }}/kubectl apply -f -
   changed_when: false
   when:
     - (tiller_override is defined and tiller_override) or (kube_version is version('v1.11.1', '>='))
     - inventory_hostname == groups['kube-master'][0]
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - name: Make sure bash_completion.d folder exists
   file:
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index d82da98ae72a424ed48fcefd0e14916fa1fa983e..65fb9d51523359fdbca32474bc6be71eb71b1481 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Start Calico resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items:
     - "{{ calico_node_manifests.results }}"
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index d5776def15172389aef7cb92400027311483b932..b495106b199b807769ff66a3f77cba5c4e688d8d 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Canal | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ canal_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 363f795a4e9b3cb9c3fe7327080b74c2f612b18a..1baaa1ce634ea37dc91a28a7b1193c42cdb21730 100755
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -1,17 +1,17 @@
 ---
 - name: Cilium | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ cilium_node_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
 
 - name: Cilium | Wait for pods to run
-  command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
   register: pods_not_ready
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index 2be0739f8f4e0a170dbd558173ce1742f889eff1..3ed49db810d6f904d42595585a4a5ec872c62806 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Flannel | Start Resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ flannel_node_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index f5ff163086520acb8e8e6f948910d79906abf9d8..3b76c4336e0b50982135ab4c41d1400ecd0fbb61 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -12,7 +12,7 @@
     - inventory_hostname == groups['kube-master'][0]
 
 - name: kube-router | Wait for kube-router pods to be ready
-  command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
+  command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"   # noqa 601
   register: pods_not_ready
   until: pods_not_ready.stdout.find("kube-router")==-1
   retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
index 9d7669cc7edaf722a05a738b7dbaf72b0a306862..48d00538ca37517df2197b09f720f18ec9eb5a54 100644
--- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: Multus | Start resources
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
-  with_items: "{{ multus_manifest_1.results }} + {{multus_manifest_2.results }}"
+  with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
   when: inventory_hostname == groups['kube-master'][0] and not item|skipped
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index 80d5fdd29f07a384a7ffe206244610081274a0e5..629c6add7700bed80c42b7c2488d26bea61114fd 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
   template:
     src: "openstack-storage-class.yml.j2"
-    dest: "{{kube_config_dir}}/openstack-storage-class.yml"
+    dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
   register: manifests
   when:
     - inventory_hostname == groups['kube-master'][0]
@@ -10,9 +10,9 @@
 - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
   kube:
     name: storage-class
-    kubectl: "{{bin_dir}}/kubectl"
+    kubectl: "{{ bin_dir }}/kubectl"
     resource: StorageClass
-    filename: "{{kube_config_dir}}/openstack-storage-class.yml"
+    filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
     state: "latest"
   when:
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index 1f262affa8fc4601c8abc7c603b33e346a6a4ebe..bbd39d63f59621d420dbea15999f3c0a2adf33ab 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -10,8 +10,8 @@
 
 - name: Create calico-kube-controllers manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment}
     - {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa}
@@ -24,11 +24,11 @@
 
 - name: Start of Calico kube controllers
   kube:
-    name: "{{item.item.name}}"
+    name: "{{ item.item.name }}"
     namespace: "kube-system"
-    kubectl: "{{bin_dir}}/kubectl"
-    resource: "{{item.item.type}}"
-    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items:
     - "{{ calico_kube_manifests.results }}"
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index a79fdee12a947353fca8f58d41d1133934a7eea3..373362427c3f3121af2e79d1ccaa114d97b13ca4 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -77,7 +77,7 @@
     - name: Join to cluster
       command: >-
         {{ bin_dir }}/kubeadm join
-        --config {{ kube_config_dir}}/kubeadm-client.conf
+        --config {{ kube_config_dir }}/kubeadm-client.conf
         --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
       register: kubeadm_join
       async: 120
@@ -88,7 +88,7 @@
     - name: Join to cluster with ignores
       command: >-
         {{ bin_dir }}/kubeadm join
-        --config {{ kube_config_dir}}/kubeadm-client.conf
+        --config {{ kube_config_dir }}/kubeadm-client.conf
         --ignore-preflight-errors=all
       register: kubeadm_join
       async: 60
diff --git a/roles/kubernetes/master/tasks/encrypt-at-rest.yml b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
index 1927900392d0ce15d99722b16c26e32758398db1..09584dce8fcfd48f539587f4dc963a375242b748 100644
--- a/roles/kubernetes/master/tasks/encrypt-at-rest.yml
+++ b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
@@ -12,12 +12,12 @@
 
 - name: Base 64 Decode slurped secrets_encryption.yaml file
   set_fact:
-    secret_file_decoded: "{{secret_file_encoded['content'] | b64decode | from_yaml}}"
+    secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
   when: secrets_encryption_file.stat.exists
 
 - name: Extract secret value from secrets_encryption.yaml
   set_fact:
-    kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode}}"
+    kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
   when: secrets_encryption_file.stat.exists
 
 - name: Set kube_encrypt_token across master nodes
diff --git a/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
index c3afb5c0cdf3fd548b76d259cdad87b05114d9d2..e1dfef01cca065e81b5cebd5ebaf740aefcc88d5 100644
--- a/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
@@ -5,7 +5,7 @@
       {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
       {{ first_kube_master }}:{{ kube_apiserver_port }}
       {%- else -%}
-      {{ kube_apiserver_endpoint | regex_replace('https://', '')}}
+      {{ kube_apiserver_endpoint | regex_replace('https://', '') }}
       {%- endif %}
   tags:
     - facts
@@ -21,15 +21,15 @@
 
 - name: Wait for k8s apiserver
   wait_for:
-    host: "{{kubeadm_discovery_address.split(':')[0]}}"
-    port: "{{kubeadm_discovery_address.split(':')[1]}}"
+    host: "{{ kubeadm_discovery_address.split(':')[0] }}"
+    port: "{{ kubeadm_discovery_address.split(':')[1] }}"
     timeout: 180
 
 
 - name: Upload certificates so they are fresh and not expired
   command: >-
     {{ bin_dir }}/kubeadm init phase
-    --config {{ kube_config_dir}}/kubeadm-config.yaml
+    --config {{ kube_config_dir }}/kubeadm-config.yaml
     upload-certs --experimental-upload-certs
     {% if kubeadm_certificate_key is defined %}
     --certificate-key={{ kubeadm_certificate_key }}
@@ -46,7 +46,7 @@
 - name: Joining control plane node to the cluster.
   command: >-
     {{ bin_dir }}/kubeadm join
-    --config {{ kube_config_dir}}/kubeadm-controlplane.yaml
+    --config {{ kube_config_dir }}/kubeadm-controlplane.yaml
     --ignore-preflight-errors=all
     {% if kubeadm_certificate_key is defined %}
     --certificate-key={{ kubeadm_certificate_key }}
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index 24c91d1bed81921ec8662618e9d662492aafe6df..a00702c9546ebabf705a8cca87b01625680d64f6 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -3,7 +3,7 @@
   stat:
     path: "{{ kube_cert_dir }}/apiserver.pem"
   register: old_apiserver_cert
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   run_once: true
 
 - name: kubeadm | Migrate old certs if necessary
@@ -41,14 +41,14 @@
 
 - name: kubeadm | Delete old static pods
   file:
-    path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
+    path: "{{ kube_config_dir }}/manifests/{{ item }}.manifest"
     state: absent
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
   when:
     - old_apiserver_cert.stat.exists
 
 - name: kubeadm | Forcefully delete old static pods
-  shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
+  shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when:
     - old_apiserver_cert.stat.exists
@@ -147,7 +147,7 @@
   retries: 5
   delay: 5
   until: temp_token is succeeded
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   when: kubeadm_token is not defined
   tags:
     - kubeadm_token
@@ -190,6 +190,6 @@
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
-  delegate_to: "{{groups['kube-master']|first}}"
+  delegate_to: "{{ groups['kube-master'] | first }}"
   when: inventory_hostname in groups['kube-node']
   failed_when: false
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 3fd9855ea28489ce999972d75ee53d7c162824e9..d6ce320ba033ce75d3457dd5e3151a8eb05c8948 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -1,7 +1,7 @@
 ---
 - name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
   file:
-    path: "/etc/kubernetes/manifests/{{item}}.manifest"
+    path: "/etc/kubernetes/manifests/{{ item }}.manifest"
     state: absent
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
@@ -9,7 +9,7 @@
   when: etcd_secret_changed|default(false)
 
 - name: "Pre-upgrade | Delete master containers forcefully"
-  shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
+  shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when: kube_apiserver_manifest_replaced.changed
diff --git a/roles/kubernetes/node/tasks/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml
index 840b5bbfcd7169af3c2ebbab03aab929ab0f9467..529bc3f8fbe20881ed5df6fd0a61012763f910e2 100644
--- a/roles/kubernetes/node/tasks/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/azure-credential-check.yml
@@ -56,7 +56,7 @@
 
 - name: check azure_loadbalancer_sku value
   fail:
-    msg: "azure_loadbalancer_sku has an invalid value '{{azure_loadbalancer_sku}}'. Supported values are 'basic', 'standard'"
+    msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
   when: azure_loadbalancer_sku not in ["basic", "standard"]
 
 - name: "check azure_exclude_master_from_standard_lb is a bool"
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 9c249fc776834db6da77871e6cc9e3a7223950cd..0a593b3a0f33df5f159f81ac80005e41047350f8 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -65,7 +65,7 @@
 - name: Verify if br_netfilter module exists
   shell: "modinfo br_netfilter"
   environment:
-    PATH: "{{ ansible_env.PATH}}:/sbin"  # Make sure we can workaround RH's conservative path management
+    PATH: "{{ ansible_env.PATH }}:/sbin"  # Make sure we can workaround RH's conservative path management
   register: modinfo_br_netfilter
   failed_when: modinfo_br_netfilter.rc not in [0, 1]
   changed_when: false
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index 3584cfcf51e470b21b86ea11323c84ec759ce1f4..96de60d6e29c6d41c9f3998906e6d94ce467bbb3 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -6,7 +6,7 @@ Wants=docker.socket
 
 [Service]
 User=root
-EnvironmentFile=-{{kube_config_dir}}/kubelet.env
+EnvironmentFile=-{{ kube_config_dir }}/kubelet.env
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
 ExecStart={{ bin_dir }}/kubelet \
 		$KUBE_LOGTOSTDERR \
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 79df9b396a985acf1affc17b5a0ecbc0f2718606..e7184d4a55f37c6eb6f3218616b9ba437522cf2b 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -35,7 +35,7 @@
 - name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
   assert:
     that: item.value|type_debug == 'bool'
-    msg: "{{item.value}} isn't a bool"
+    msg: "{{ item.value }} isn't a bool"
   run_once: yes
   with_items:
     - { name: download_run_once, value: "{{ download_run_once }}" }
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index 607197475a6931484405e683d6640d2733208dfd..c06207fd0692342b8ff1cfd5b691adf242c4c3fc 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -8,9 +8,9 @@
   set_fact:
     host_architecture: >-
       {%- if ansible_architecture in architecture_groups -%}
-      {{architecture_groups[ansible_architecture]}}
+      {{ architecture_groups[ansible_architecture] }}
       {%- else -%}
-       {{ansible_architecture}}
+       {{ ansible_architecture }}
       {% endif %}
 
 - name: Force binaries directory for Container Linux by CoreOS
@@ -46,7 +46,7 @@
 - set_fact:
     bogus_domains: |-
       {% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
-      {{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./
+      {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
       {%- endfor %}
     cloud_resolver: >-
       {%- if cloud_provider is defined and cloud_provider == 'gce' -%}
@@ -139,9 +139,9 @@
 - name: generate nameservers to resolvconf
   set_fact:
     nameserverentries:
-      nameserver {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ')}}
+      nameserver {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ') }}
     supersede_nameserver:
-      supersede domain-name-servers {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
+      supersede domain-name-servers {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
 
 - name: gather os specific variables
   include_vars: "{{ item }}"
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 93b95a32b180a4879ebea7208c2fdb700b7ff091..1e28d178553fc2fb86b559651e7af2406b70abf8 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -17,7 +17,7 @@
     - master
     - node
   with_items:
-    - "{{bin_dir}}"
+    - "{{ bin_dir }}"
     - "{{ kube_config_dir }}"
     - "{{ kube_cert_dir }}"
     - "{{ kube_manifest_dir }}"
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 099077753eda96435c3b94d5c91f30b7bf3a2e88..a57e567fe1527d306e4b31881d6f587ba7dade5c 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -5,7 +5,7 @@
 
 - name: Add domain/search/nameservers/options to resolv.conf
   blockinfile:
-    path: "{{resolvconffile}}"
+    path: "{{ resolvconffile }}"
     block: |-
       {% for item in [domainentry] + [searchentries] + nameserverentries.split(',') -%}
       {{ item }}
@@ -22,7 +22,7 @@
 
 - name: Remove search/domain/nameserver options before block
   replace:
-    dest: "{{item[0]}}"
+    dest: "{{ item[0] }}"
     regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
     backup: yes
     follow: yes
@@ -33,7 +33,7 @@
 
 - name: Remove search/domain/nameserver options after block
   replace:
-    dest: "{{item[0]}}"
+    dest: "{{ item[0] }}"
     regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+'
     replace: '\1'
     backup: yes
@@ -51,7 +51,7 @@
 
 - name: persist resolvconf cloud init file
   template:
-    dest: "{{resolveconf_cloud_init_conf}}"
+    dest: "{{ resolveconf_cloud_init_conf }}"
     src: resolvconf.j2
     owner: root
     mode: 0644
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index eb87b14f4b488f720a0a0f5e9ca8fcef80a96880..5e2c87b55b9cbb0a9ab86b0f83ee707ad31d6b07 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -31,14 +31,14 @@
 
 - name: Stat sysctl file configuration
   stat:
-    path: "{{sysctl_file_path}}"
+    path: "{{ sysctl_file_path }}"
   register: sysctl_file_stat
   tags:
     - bootstrap-os
 
 - name: Change sysctl file path to link source if linked
   set_fact:
-    sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
+    sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}"
   when:
     - sysctl_file_stat.stat.islnk is defined
     - sysctl_file_stat.stat.islnk
@@ -52,7 +52,7 @@
 
 - name: Enable ip forwarding
   sysctl:
-    sysctl_file: "{{sysctl_file_path}}"
+    sysctl_file: "{{ sysctl_file_path }}"
     name: net.ipv4.ip_forward
     value: 1
     state: present
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 02fc3c420aeb70b8da8ffe9f64f5169210b5cfb5..1298b78525721c91f72feb082b9c924525a5abba 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -5,7 +5,7 @@
     block: |-
       {% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
       {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or fallback_ips[item] != "skip" -%}
-      {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item]))}}
+      {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}
       {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
       {% endif %}
       {% endfor %}
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 9165c09f811008a2f2e90684b8594e29d99fe240..52ffb8b86a879c17d331717845ad3da8a4a2f1c6 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -5,7 +5,7 @@
       {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
       {{ item }}
       {% endfor %}
-    path: "{{dhclientconffile}}"
+    path: "{{ dhclientconffile }}"
     create: yes
     state: present
     insertbefore: BOF
diff --git a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
index a184ddabc1133c0d4e4335d09c56b4e21df9a689..cf935a363a122724e755ba1ee07729288a039438 100644
--- a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
+++ b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
@@ -5,7 +5,7 @@
 
 - name: Remove kubespray specific config from dhclient config
   blockinfile:
-    path: "{{dhclientconffile}}"
+    path: "{{ dhclientconffile }}"
     state: absent
     backup: yes
     marker: "# Ansible entries {mark}"
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index 0f0c95b48979e3cf78fc611fa74a81fac6ee6d35..5d27928737d7333293e86afc4bc189e29e6d4080 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -2,7 +2,7 @@
 - name: "Check_tokens | check if the tokens have already been generated on first master"
   stat:
     path: "{{ kube_token_dir }}/known_tokens.csv"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   register: known_tokens_master
   run_once: true
 
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 660b7367aea9d756e561d8c7e9d79efa5be3119e..9507a9323bde6b94ea466740ce18a2646f004b83 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_script_dir }}/kube-gen-token.sh"
     mode: 0700
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for master components
@@ -18,7 +18,7 @@
   register: gentoken_master
   changed_when: "'Added' in gentoken_master.stdout"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for node components
@@ -31,14 +31,14 @@
   register: gentoken_node
   changed_when: "'Added' in gentoken_node.stdout"
   run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | Get list of tokens from first master
   shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
   register: tokens_list
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
@@ -48,7 +48,7 @@
     warn: false
   register: tokens_data
   check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 5cdd143e352f4f3468dcb4b4291fe11480b5ec19..c0401cd2419498bb1f294e6d5e1593ea8ec71d68 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -376,7 +376,7 @@ contiv_global_neighbor_as: "500"
 fallback_ips_base: |
   ---
   {% for item in groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([])|unique %}
-  {{item}}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
+  {{ item }}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
   {% endfor %}
 fallback_ips: "{{ fallback_ips_base | from_yaml }}"
 
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index d7f02588cbd6cc50db40781328feab9a9442a249..41e8c85da4251c8092a954afbdcb0bae3c9f74de 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -61,7 +61,7 @@
     ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - calico_version is version("v3.0.0", ">=")
 
@@ -79,7 +79,7 @@
     ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - calico_version is version("v3.0.0", "<")
 
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 321669add149dcdcc28384fdeba86e2f8c7481fb..b4923cec882d1ff20986fcb62993352d0db6cc1c 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -155,7 +155,7 @@
     - calico_version is version('v3.0.0', '>=')
 
 - name: Calico | Set global as_num (legacy)
-  command: "{{ bin_dir}}/calicoctl.sh config set asNumber {{ global_as_num }}"
+  command: "{{ bin_dir }}/calicoctl.sh config set asNumber {{ global_as_num }}"
   run_once: true
   when:
     - calico_version is version('v3.0.0', '<')
@@ -301,7 +301,7 @@
       "name": "{{ inventory_hostname }}-{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
    },
    "spec": {
-      "asNumber": "{{ local_as | default(global_as_num)}}",
+      "asNumber": "{{ local_as | default(global_as_num) }}",
       "node": "{{ inventory_hostname }}",
       "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
    }}' | {{ bin_dir }}/calicoctl.sh create --skip-exists -f -
@@ -319,7 +319,7 @@
   shell: >
    echo '{
    "kind": "bgpPeer",
-   "spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
+   "spec": {"asNumber": "{{ local_as | default(global_as_num) }}"},
    "apiVersion": "v1",
    "metadata": {"node": "{{ inventory_hostname }}",
      "scope": "node",
@@ -338,8 +338,8 @@
 
 - name: Calico | Create calico manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico-config, file: calico-config.yml, type: cm}
     - {name: calico-node, file: calico-node.yml, type: ds}
@@ -353,8 +353,8 @@
 
 - name: Calico | Create calico manifests for kdd
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico, file: kdd-crds.yml, type: kdd}
   register: calico_node_kdd_manifest
@@ -364,8 +364,8 @@
 
 - name: Calico | Create calico manifests for typha
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: calico, file: calico-typha.yml, type: typha}
   register: calico_node_typha_manifest
diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml
index 9754d058e4ba3290258e8233e1d6138246d707ee..a4b7cffd65ce37c653d40a9818ab0fe600aa2946 100644
--- a/roles/network_plugin/calico/tasks/upgrade.yml
+++ b/roles/network_plugin/calico/tasks/upgrade.yml
@@ -7,7 +7,7 @@
     owner: root
     group: root
     force: yes
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 - name: "Create etcdv2 and etcdv3 calicoApiConfig"
   template:
     src: "{{ item }}-store.yml.j2"
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index acf0d3567753d1b46df6404a5f4ab43bc202befe..3de079b5f55367f34a0ea3d55e176bec79584fc7 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -31,7 +31,7 @@
     '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
+  delegate_to: "{{ groups['etcd'][0] }}"
   changed_when: false
   run_once: true
   environment:
@@ -40,8 +40,8 @@
 
 - name: Canal | Create canal node manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: canal-config, file: canal-config.yaml, type: cm}
     - {name: canal-node, file: canal-node.yaml, type: ds}
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 44ab4ae573c82d8895b1fa398b7360dd57e05dbd..e830818e9ef13d7f7fe855924fa3c956ef214e13 100755
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -27,8 +27,8 @@
 
 - name: Cilium | Create Cilium node manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: cilium, file: cilium-config.yml, type: cm}
     - {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml
index 0124fe2375ba470d3ada7c0b00f39153484ac093..d5a725baff05172ec9496531455f8d1bb66e5694 100644
--- a/roles/network_plugin/flannel/tasks/main.yml
+++ b/roles/network_plugin/flannel/tasks/main.yml
@@ -1,8 +1,8 @@
 ---
 - name: Flannel | Create Flannel manifests
   template:
-    src: "{{item.file}}.j2"
-    dest: "{{kube_config_dir}}/{{item.file}}"
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
   with_items:
     - {name: flannel, file: cni-flannel-rbac.yml, type: sa}
     - {name: kube-flannel, file: cni-flannel.yml, type: ds}
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index a6a481e4c2a445c7aab8efedd6d43f073c7f9752..eb70b0fbe7d78a45ea84c05cbe52ab1b4d5a2f27 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -1,21 +1,21 @@
 ---
 - name: kube-router | Add annotations on kube-master
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_master }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
 
 - name: kube-router | Add annotations on kube-node
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
 
 - name: kube-router | Add common annotations on all servers
-  command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
+  command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
-  delegate_to: "{{groups['kube-master'][0]}}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
   when: kube_router_annotations_all is defined and inventory_hostname in groups['all']
\ No newline at end of file
diff --git a/roles/recover_control_plane/etcd/tasks/prepare.yml b/roles/recover_control_plane/etcd/tasks/prepare.yml
index 0f00f0338bcc3bec0f7a60e0eeed3bf121b7bbc4..d3cacb9345b8657fc2187d7b2986b4fa4a368a66 100644
--- a/roles/recover_control_plane/etcd/tasks/prepare.yml
+++ b/roles/recover_control_plane/etcd/tasks/prepare.yml
@@ -32,7 +32,7 @@
     - old_etcd_members is defined
 
 - name: Remove old cluster members
-  shell: "{{ bin_dir}}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
+  shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
   environment:
     - ETCDCTL_API: 3
     - ETCDCTL_CA_FILE: /etc/ssl/etcd/ssl/ca.pem
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index b820bff099887d7339f96d287bc6f7252bd75817..530cb29dfd151c253322629905fc7ddf7c764c65 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: Delete node
-  command: "{{ bin_dir}}/kubectl delete node {{ item }}"
+  command: "{{ bin_dir }}/kubectl delete node {{ item }}"
   with_items:
     - "{{ node.split(',') | default(groups['kube-node']) }}"
   delegate_to: "{{ groups['kube-master']|first }}"
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 8cd3ef23a738db89ffbd76f2d6d6c81bc0cdc7e1..56039fb0d6c28e4bcfdee7fb9144c81ac06f49d5 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -118,7 +118,7 @@
     - mounts
 
 - name: reset | unmount kubelet dirs
-  command: umount -f {{item}}
+  command: umount -f {{ item }}
   with_items: '{{ mounted_dirs.stdout_lines }}'
   register: umount_dir
   retries: 4
@@ -170,7 +170,7 @@
     path: "{{ item }}"
     state: absent
   with_items:
-    - "{{kube_config_dir}}"
+    - "{{ kube_config_dir }}"
     - /var/lib/kubelet
     - /root/.kube
     - /root/.helm
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index b2a3ad897556408f8d741a1a9c2657ab363815d4..e81e5c79fea93963c8bb5c9e066ac5bc89a6cb14 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -16,11 +16,11 @@
 
     # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
     - name: Check current nodeselector for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
+      shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
       register: current_kube_proxy_state
 
     - name: Apply nodeselector patch for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
+      shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
       args:
         chdir: "{{ kubernetes_user_manifests_path }}"
       register: patch_kube_proxy_state
diff --git a/scale.yml b/scale.yml
index 723debbb33f12ff7c651c1a57dbac5ea2aa6fe16..c7f3dfd31263fa1679f0b5102f580a3868a7be8c 100644
--- a/scale.yml
+++ b/scale.yml
@@ -53,4 +53,4 @@
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: network_plugin, tags: network }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index db577161baccad39ad61ab90ce06fc4238bc5ff9..9ba68c0e15b242d40b12cedcaf5153d4db9eafd3 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -32,13 +32,13 @@
       - name: etcd_info
         cmd: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | default('http://127.0.0.1:2379') }} cluster-health"
       - name: calico_info
-        cmd: "{{bin_dir}}/calicoctl node status"
+        cmd: "{{ bin_dir }}/calicoctl node status"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: calico_workload_info
-        cmd: "{{bin_dir}}/calicoctl get workloadEndpoint -o wide"
+        cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: calico_pool_info
-        cmd: "{{bin_dir}}/calicoctl get ippool -o wide"
+        cmd: "{{ bin_dir }}/calicoctl get ippool -o wide"
         when: '{{ kube_network_plugin == "calico" }}'
       - name: weave_info
         cmd: weave report
@@ -111,19 +111,19 @@
     - name: Storing commands output
       shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
       failed_when: false
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
       when: item.when | default(True)
       no_log: True
 
     - name: Fetch results
       fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
       when: item.when | default(True)
       failed_when: false
 
     - name: Fetch logs
       fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs
-      with_items: "{{logs}}"
+      with_items: "{{ logs }}"
       failed_when: false
 
     - name: Pack results and logs
@@ -137,4 +137,4 @@
 
     - name: Clean up collected command outputs
       file: path={{ item.name }} state=absent
-      with_items: "{{commands}}"
+      with_items: "{{ commands }}"
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index a09960d9691d8be1a552cbd4ab5db1bf6bed594b..270a39f7b476b653018470f6d0d4327593295ad4 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -40,7 +40,7 @@
     dest: "{{ images_dir }}/Dockerfile"
 
 - name: Create docker images for each OS
-  command: docker build -t {{registry}}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
+  command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
   with_dict:
     - "{{ images }}"
 
diff --git a/tests/cloud_playbooks/create-aws.yml b/tests/cloud_playbooks/create-aws.yml
index 7dbf3d6c35d40e757ab0132aef2808d5179709a1..dcc51bdf78d0f8dafee73de78dc8406184edb946 100644
--- a/tests/cloud_playbooks/create-aws.yml
+++ b/tests/cloud_playbooks/create-aws.yml
@@ -10,8 +10,8 @@
       aws_access_key: "{{ aws.access_key }}"
       aws_secret_key: "{{ aws.secret_key }}"
       region: "{{ aws.region }}"
-      group_id: "{{ aws.group}}"
-      instance_type: "{{ aws.instance_type}}"
+      group_id: "{{ aws.group }}"
+      instance_type: "{{ aws.instance_type }}"
       image: "{{ aws.ami_id }}"
       wait: true
       count: "{{ aws.count }}"
@@ -30,4 +30,4 @@
       timeout: 300
       state: started
     delegate_to: localhost
-    with_items: "{{ec2.instances}}"
+    with_items: "{{ ec2.instances }}"
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index 86e97f1bb84a5df6aac6997d9ca0d18f9f3b9a2b..02fab16e4e9d2367b2021c07af8454c823cf8ba8 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -52,20 +52,20 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - name: show vars
-      debug: msg="{{cloud_region}}, {{cloud_image}}"
+      debug: msg="{{ cloud_region }}, {{ cloud_image }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
-          ["k8s-{{test_name}}-1", "k8s-{{test_name}}-2", "k8s-{{test_name}}-3"]
+          ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
           {%- else -%}
-          ["k8s-{{test_name}}-1", "k8s-{{test_name}}-2"]
+          ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"]
           {%- endif -%}
 
-    - name: Manage DO instances | {{state}}
+    - name: Manage DO instances | {{ state }}
       digital_ocean:
         unique_name: yes
         api_token: "{{ lookup('env','DO_API_TOKEN') }}"
@@ -73,16 +73,16 @@
         image_id: "{{ cloud_image }}"
         name: "{{ item }}"
         private_networking: no
-        region_id: "{{cloud_region}}"
-        size_id: "{{cloud_machine_type}}"
-        ssh_key_ids: "{{ssh_key_id}}"
-        state: "{{state}}"
+        region_id: "{{ cloud_region }}"
+        size_id: "{{ cloud_machine_type }}"
+        ssh_key_ids: "{{ ssh_key_id }}"
+        state: "{{ state }}"
         wait: yes
       register: droplets
-      with_items: "{{instance_names}}"
+      with_items: "{{ instance_names }}"
 
     - debug:
-        msg: "{{droplets}}, {{inventory_path}}"
+        msg: "{{ droplets }}, {{ inventory_path }}"
       when: state == 'present'
 
     - name: Template the inventory
@@ -92,6 +92,6 @@
       when: state == 'present'
 
     - name: Wait for SSH to come up
-      wait_for: host={{item.droplet.ip_address}} port=22 delay=10 timeout=180 state=started
-      with_items: "{{droplets.results}}"
+      wait_for: host={{ item.droplet.ip_address }} port=22 delay=10 timeout=180 state=started
+      with_items: "{{ droplets.results }}"
       when: state == 'present'
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index 3e7093bb9ec689cf3ecb64182a9772b0357b97ba..7f2de0dd615a2c535d65c600133284e77115f054 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -14,39 +14,39 @@
 
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
           {%- elif mode == 'aio' -%}
-          k8s-{{test_name}}-1
+          k8s-{{ test_name }}-1
           {%- else -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
     - name: Create gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         machine_type: "{{ cloud_machine_type }}"
         image: "{{ cloud_image | default(omit) }}"
         image_family: "{{ cloud_image_family | default(omit) }}"
         preemptible: "{{ preemptible }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region}}"
-        metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script|default("")}}"}'
-        tags: "build-{{test_name}},{{kube_network_plugin}}"
+        zone: "{{ cloud_region }}"
+        metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}'
+        tags: "build-{{ test_name }},{{ kube_network_plugin }}"
         ip_forward: yes
         service_account_permissions: ['compute-rw']
       register: gce
 
     - name: Add instances to host group
-      add_host: hostname={{item.public_ip}} groupname="waitfor_hosts"
-      with_items: '{{gce.instance_data}}'
+      add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
+      with_items: '{{ gce.instance_data }}'
 
     - name: Template the inventory
       template:
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index 53d0164c1e8392074dc22b41dfc938a86e7cef43..a5b4a6e4d631fb66a5c71bf4cc10ba35f4cd9cee 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -8,25 +8,25 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{test_id |regex_replace('\\.', '-')}}"
+        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
 
     - set_fact:
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
           {%- else -%}
-          k8s-{{test_name}}-1,k8s-{{test_name}}-2
+          k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
           {%- endif -%}
 
     - name: stop gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region | default('europe-west1-b')}}"
+        zone: "{{ cloud_region | default('europe-west1-b') }}"
         state: 'stopped'
       async: 120
       poll: 3
@@ -35,13 +35,13 @@
 
     - name: delete gce instances
       gce:
-        instance_names: "{{instance_names}}"
+        instance_names: "{{ instance_names }}"
         image: "{{ cloud_image | default(omit) }}"
         service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file | default(omit)}}"
-        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        pem_file: "{{ gce_pem_file | default(omit) }}"
+        credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
-        zone: "{{cloud_region | default('europe-west1-b')}}"
+        zone: "{{ cloud_region | default('europe-west1-b') }}"
         state: 'absent'
       async: 120
       poll: 3
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index d598d6044ba95fe047b9e7f1fe41eb75ec73ae27..39cec6f6a2dad3b639dedea8fc80e0e8b546b5b3 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -16,7 +16,7 @@
         test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
 
     - set_fact:
-        file_name: "{{ostype}}-{{kube_network_plugin}}-{{commit}}-logs.tar.gz"
+        file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
 
     - name: Create a bucket
       gc_storage:
@@ -30,31 +30,31 @@
     - name: Create a lifecycle template for the bucket
       template:
         src: gcs_life.json.j2
-        dest: "{{dir}}/gcs_life.json"
+        dest: "{{ dir }}/gcs_life.json"
 
     - name: Create a boto config to access GCS
       template:
         src: boto.j2
-        dest: "{{dir}}/.boto"
+        dest: "{{ dir }}/.boto"
       no_log: True
 
     - name: Download gsutil cp installer
       get_url:
         url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
-        dest: "{{dir}}/gcp-installer.sh"
+        dest: "{{ dir }}/gcp-installer.sh"
 
     - name: Get gsutil tool
-      script: "{{dir}}/gcp-installer.sh"
+      script: "{{ dir }}/gcp-installer.sh"
       environment:
         CLOUDSDK_CORE_DISABLE_PROMPTS: 1
-        CLOUDSDK_INSTALL_DIR: "{{dir}}"
+        CLOUDSDK_INSTALL_DIR: "{{ dir }}"
       no_log: True
       failed_when: false
 
     - name: Apply the lifecycle rules
-      command: "{{dir}}/google-cloud-sdk/bin/gsutil lifecycle set {{dir}}/gcs_life.json gs://{{test_name}}"
+      command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
       environment:
-        BOTO_CONFIG: "{{dir}}/.boto"
+        BOTO_CONFIG: "{{ dir }}/.boto"
       no_log: True
 
     - name: Upload collected diagnostic info
@@ -63,13 +63,13 @@
         mode: put
         permission: public-read
         object: "{{ file_name }}"
-        src: "{{dir}}/logs.tar.gz"
+        src: "{{ dir }}/logs.tar.gz"
         headers: '{"Content-Encoding": "x-gzip"}'
         gs_access_key: "{{ gs_key }}"
         gs_secret_key: "{{ gs_skey }}"
-        expiration: "{{expire_days * 36000|int}}"
+        expiration: "{{ expire_days * 36000|int }}"
       failed_when: false
       no_log: True
 
     - debug:
-        msg: "A public url https://storage.googleapis.com/{{test_name}}/{{file_name}}"
+        msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
diff --git a/tests/testcases/015_check-pods-running.yml b/tests/testcases/015_check-pods-running.yml
index c1e4a6629307d3b2581d0cc92d8d59460aa97413..28c5d80167c32a4b9ec9aabc336f7ba90a62de72 100644
--- a/tests/testcases/015_check-pods-running.yml
+++ b/tests/testcases/015_check-pods-running.yml
@@ -12,14 +12,14 @@
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
 
   - name: Check that all pods are running and ready
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces --no-headers -o yaml"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
     register: run_pods_log
     until:
     # Check that all pods are running
@@ -32,9 +32,9 @@
     no_log: true
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 1aba38688a6e6152a3f5540331a4aa41c0f59dd2..a88df1052fa453ffe5324cddd7e550101244e229 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -15,13 +15,13 @@
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
   - name: Create test namespace
-    shell: "{{bin_dir}}/kubectl create namespace test"
+    shell: "{{ bin_dir }}/kubectl create namespace test"
 
   - name: Run a replica controller composed of 2 pods in test ns
-    shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null"
+    shell: "{{ bin_dir }}/kubectl run test --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --replicas=2 --command -- tail -f /dev/null"
 
   - name: Check that all pods are running and ready
-    shell: "{{bin_dir}}/kubectl get pods --namespace test --no-headers -o yaml"
+    shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
     register: run_pods_log
     until:
     # Check that all pods are running
@@ -34,31 +34,31 @@
     no_log: true
 
   - name: Get pod names
-    shell: "{{bin_dir}}/kubectl get pods -n test -o json"
+    shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
     register: pods
     no_log: true
 
-  - debug: msg="{{pods.stdout.split('\n')}}"
+  - debug: msg="{{ pods.stdout.split('\n') }}"
     failed_when: not run_pods_log is success
 
   - name: Get hostnet pods
-    command: "{{bin_dir}}/kubectl get pods -n test -o
+    command: "{{ bin_dir }}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: hostnet_pods
     no_log: true
 
   - name: Get running pods
-    command: "{{bin_dir}}/kubectl get pods -n test -o
+    command: "{{ bin_dir }}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: running_pods
     no_log: true
 
   - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
     register: get_pods
     no_log: true
 
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
+  - debug: msg="{{ get_pods.stdout.split('\n') }}"
 
   - set_fact:
       kube_pods_subnet: 10.233.64.0/18
@@ -66,30 +66,30 @@
       pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
       pods_hostnet: |
         {% set list = hostnet_pods.stdout.split(" ") %}
-        {{list}}
+        {{ list }}
       pods_running: |
         {% set list = running_pods.stdout.split(" ") %}
-        {{list}}
+        {{ list }}
 
   - name: Check pods IP are in correct network
     assert:
       that: item | ipaddr(kube_pods_subnet)
     when: not item in pods_hostnet and item in pods_running
-    with_items: "{{pod_ips}}"
+    with_items: "{{ pod_ips }}"
 
   - name: Ping between pods is working
-    shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
+    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
     with_nested:
-    - "{{pod_names}}"
-    - "{{pod_ips}}"
+    - "{{ pod_names }}"
+    - "{{ pod_ips }}"
 
   - name: Ping between hostnet pods is working
-    shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
+    shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
     when: item[0] in pods_hostnet and item[1] in pods_hostnet
     with_nested:
-    - "{{pod_names}}"
-    - "{{pod_ips}}"
+    - "{{ pod_names }}"
+    - "{{ pod_ips }}"
 
   - name: Delete test namespace
-    shell: "{{bin_dir}}/kubectl delete namespace test"
+    shell: "{{ bin_dir }}/kubectl delete namespace test"
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 8b85760f880959526658a597be7c7ce745e317d1..c1264f8422a8080e85700f05b94a59ee74c0b9cf 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -24,8 +24,8 @@
       when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
     - name: Wait for netchecker server
-      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep ^netchecker-server"
-      delegate_to: "{{groups['kube-master'][0]}}"
+      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       run_once: true
       register: ncs_pod
       until: ncs_pod.stdout.find('Running') != -1
@@ -33,18 +33,18 @@
       delay: 10
 
     - name: Wait for netchecker agents
-      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep '^netchecker-agent-.*Running'"
+      shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       register: nca_pod
       until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
       retries: 3
       delay: 10
       failed_when: false
 
-    - command: "{{ bin_dir }}/kubectl -n {{netcheck_namespace}} describe pod -l app={{ item }}"
+    - command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
       with_items:
         - netchecker-agent
@@ -56,9 +56,9 @@
       run_once: true
 
     - name: Get netchecker agents
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/agents/ return_content=yes
+      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/ return_content=yes
       run_once: true
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       register: agents
       retries: 18
       delay: "{{ agent_report_interval }}"
@@ -77,8 +77,8 @@
         - agents.content[0] == '{'
 
     - name: Check netchecker status
-      uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/connectivity_check status_code=200 return_content=yes
-      delegate_to: "{{groups['kube-master'][0]}}"
+      uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check status_code=200 return_content=yes
+      delegate_to: "{{ groups['kube-master'][0] }}"
       run_once: true
       register: result
       retries: 3
@@ -97,13 +97,13 @@
     - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
       run_once: true
       when: not result is success
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
 
-    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{item}} --all-containers"
+    - command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
       run_once: true
       when: not result is success
-      delegate_to: "{{groups['kube-master'][0]}}"
+      delegate_to: "{{ groups['kube-master'][0] }}"
       no_log: false
       with_items:
         - kube-router
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 4cdbaeb72b09a202107fb5c5dd18b1984927b7e7..5ea8da37dd8a4438770d0b08efee7ba695775333 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -38,7 +38,7 @@
   pre_tasks:
     - name: gather facts from all instances
       setup:
-      delegate_to: "{{item}}"
+      delegate_to: "{{ item }}"
       delegate_facts: True
       with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
 
@@ -50,7 +50,7 @@
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: container-engine, tags: "container-engine", when: deploy_container_engine|default(true) }
     - { role: download, tags: download, when: "not skip_downloads" }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -76,7 +76,7 @@
     - { role: kubernetes/client, tags: client }
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - name: Upgrade calico on all masters and nodes
   hosts: kube-master:kube-node
@@ -98,7 +98,7 @@
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master[0]
   any_errors_fatal: true
@@ -112,14 +112,14 @@
   roles:
     - { role: kubespray-defaults}
     - { role: network_plugin/calico/rr, tags: network }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }
-  environment: "{{proxy_env}}"
+  environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"