diff --git a/README.md b/README.md
index eb896255d52fdf90dd7c20f23797a8d1b437f0bd..0864d2ce90f630af9bf1e069cfa5071c54ce0564 100644
--- a/README.md
+++ b/README.md
@@ -67,12 +67,13 @@ plugins can be deployed for a given single cluster.
 Requirements
 --------------
 
+* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine
+  that will run Ansible commands**
 * The target servers must have **access to the Internet** in order to pull docker images.
+* The target servers are configured to allow **IPv4 forwarding**.
+* **Your ssh key must be copied** to all the servers part of your inventory.
 * The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
 in order to avoid any issue during deployment you should disable your firewall.
-* The target servers are configured to allow **IPv4 forwarding**.
-* **Copy your ssh keys** to all the servers part of your inventory.
-* **Ansible v2.2 (or newer) and python-netaddr**
 
 
 ## Network plugins
diff --git a/docs/azure.md b/docs/azure.md
index 6b75f2fce1fa16d9b21e741ed15a67ac2dc0423d..4aeabde711b764568eb08ad453885dcd4de0d743 100644
--- a/docs/azure.md
+++ b/docs/azure.md
@@ -1,7 +1,7 @@
 Azure
 ===============
 
-To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
+To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
 
 All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
 
@@ -49,8 +49,8 @@ This is the AppId from the last command
 - Create the role assignment with:
 `azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
 
-azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
+azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
 
 ## Provisioning Azure with Resource Group Templates
 
-You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
\ No newline at end of file
+You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index 2e882af92b56ab9c9788878d75bacb399b8c5ef7..606f226be11c24ae72038b0c4f2612ec255fa49f 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -24,7 +24,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
 kube_users_dir: "{{ kube_config_dir }}/users"
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.5.1
+kube_version: v1.5.3
 
 # Where the binaries will be downloaded.
 # Note: ensure that you've enough disk space (about 1G)
diff --git a/reset.yml b/reset.yml
index 9cf5047561d47eba7554d13b9f7f31b264e9ccd0..42a188cccdecb4c6aeab370b7e00dc020bbc9c5a 100644
--- a/reset.yml
+++ b/reset.yml
@@ -9,7 +9,8 @@
 
   pre_tasks:
     - name: check confirmation
-      fail: msg="Reset confirmation failed"
+      fail:
+        msg: "Reset confirmation failed"
       when: reset_confirmation != "yes"
 
   roles:
diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml
index 394ff92945c99882977c63b4e668131b13189fb3..43ec8ebbb9a8ddf6654b149aaa6afd33c40dc016 100644
--- a/roles/adduser/tasks/main.yml
+++ b/roles/adduser/tasks/main.yml
@@ -1,6 +1,8 @@
 ---
 - name: User | Create User Group
-  group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}}
+  group:
+    name: "{{user.group|default(user.name)}}"
+    system: "{{user.system|default(omit)}}"
 
 - name: User | Create User
   user:
diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml
index d1aae5ca82a92069ec09eb6a06b492a361caf413..2d240a5608b3ffef084a08bda55ee5844d42cbb0 100644
--- a/roles/bastion-ssh-config/tasks/main.yml
+++ b/roles/bastion-ssh-config/tasks/main.yml
@@ -15,4 +15,6 @@
 
 - name: create ssh bastion conf
   become: false
-  template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf"
+  template:
+    src: ssh-bastion.conf
+    dest: "{{ playbook_dir }}/ssh-bastion.conf"
diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml
index 9c41ae84c2a7b41e376976b6f31ed539d59a989c..b8cf126c16396657e396f02a0d062a9c55e7de8d 100644
--- a/roles/bootstrap-os/tasks/bootstrap-centos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml
@@ -1,7 +1,8 @@
 ---
 
 - name: Check presence of fastestmirror.conf
-  stat: path=/etc/yum/pluginconf.d/fastestmirror.conf
+  stat:
+    path: /etc/yum/pluginconf.d/fastestmirror.conf
   register: fastestmirror
 
 # fastestmirror plugin actually slows down Ansible deployments
diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml
index 9ef440e5996b8026987eb11df69cc8ae125dfea6..b806d9f6dd2cb3343ee0084a834fb31e7e65c0ee 100644
--- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml
@@ -23,7 +23,9 @@
   tags: facts
 
 - name: Bootstrap | Copy get-pip.py
-  copy: src=get-pip.py dest=~/get-pip.py
+  copy:
+    src: get-pip.py
+    dest: ~/get-pip.py
   when: (need_pip | failed)
 
 - name: Bootstrap | Install pip
@@ -31,11 +33,16 @@
   when: (need_pip | failed)
 
 - name: Bootstrap | Remove get-pip.py
-  file: path=~/get-pip.py state=absent
+  file:
+    path: ~/get-pip.py
+    state: absent
   when: (need_pip | failed)
 
 - name: Bootstrap | Install pip launcher
-  copy: src=runner dest=/opt/bin/pip mode=0755
+  copy:
+    src: runner
+    dest: /opt/bin/pip
+    mode: 0755
   when: (need_pip | failed)
 
 - name: Install required python modules
diff --git a/roles/bootstrap-os/tasks/setup-pipelining.yml b/roles/bootstrap-os/tasks/setup-pipelining.yml
index ca216cc3baf2bdf0072e657b4ec8fcce224e89e5..7143f260efd28dd92b3a0bc0790eed8d24dcf14e 100644
--- a/roles/bootstrap-os/tasks/setup-pipelining.yml
+++ b/roles/bootstrap-os/tasks/setup-pipelining.yml
@@ -2,5 +2,8 @@
 # Remove requiretty to make ssh pipelining work
 
 - name: Remove require tty
-  lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent
+  lineinfile:
+    regexp: '^\w+\s+requiretty'
+    dest: /etc/sudoers
+    state: absent
 
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
index d7e65fe96da7c1ed4b1819ca18770bad2bbb150f..f8654a2623d87723e7cd45f4778e1b8800625e26 100644
--- a/roles/dnsmasq/tasks/main.yml
+++ b/roles/dnsmasq/tasks/main.yml
@@ -34,7 +34,8 @@
   register: dnsmasq_config
 
 - name: Stat dnsmasq configuration
-  stat: path=/etc/dnsmasq.d/01-kube-dns.conf
+  stat:
+    path: /etc/dnsmasq.d/01-kube-dns.conf
   register: sym
 
 - name: Move previous configuration
@@ -49,7 +50,9 @@
     state: link
 
 - name: Create dnsmasq manifests
-  template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
+  template:
+    src: "{{item.file}}"
+    dest: "{{kube_config_dir}}/{{item.file}}"
   with_items:
     - {file: dnsmasq-ds.yml, type: ds}
     - {file: dnsmasq-svc.yml, type: svc}
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index e92bf3a51d4d6eae86d835b45bf7f90b809ae283..90d7aacb8f41ca99743c7074bab3f24180c0670b 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -23,7 +23,9 @@
     state: restarted
 
 - name: Docker | pause while Docker restarts
-  pause: seconds=10 prompt="Waiting for docker restart"
+  pause:
+    seconds: 10
+    prompt: "Waiting for docker restart"
 
 - name: Docker | wait for docker
   command: "{{ docker_bin_dir }}/docker images"
diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml
index 2f629802f7e930c30597e8b4c51a0557d75703aa..f17c1bde24435b13b36cacfabf43eae0a0dadbf9 100644
--- a/roles/docker/tasks/set_facts_dns.yml
+++ b/roles/docker/tasks/set_facts_dns.yml
@@ -51,13 +51,16 @@
   when: system_search_domains.stdout != "" 
 
 - name: check number of nameservers
-  fail: msg="Too many nameservers"
+  fail:
+    msg: "Too many nameservers"
   when: docker_dns_servers|length > 3
 
 - name: check number of search domains
-  fail: msg="Too many search domains"
+  fail:
+    msg: "Too many search domains"
   when: docker_dns_search_domains|length > 6
 
 - name: check length of search domains
-  fail: msg="Search domains exceeded limit of 256 characters"
+  fail:
+    msg: "Search domains exceeded limit of 256 characters"
   when: docker_dns_search_domains|join(' ')|length > 256
diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml
index a107ab462e38d5c5f7956c4db66e5fb7b0541dba..18710ac4983e280a5a16b655eab87afdc97a09bb 100644
--- a/roles/docker/tasks/systemd.yml
+++ b/roles/docker/tasks/systemd.yml
@@ -1,6 +1,8 @@
 ---
 - name: Create docker service systemd directory if it doesn't exist
-  file: path=/etc/systemd/system/docker.service.d state=directory
+  file:
+    path: /etc/systemd/system/docker.service.d
+    state: directory
 
 - name: Write docker proxy drop-in
   template:
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 7b49f4f0ea6a02248837f8957b2c524112db0b16..b4eb790534e8f8fe2becb8f94bb49be7a485f254 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -5,7 +5,10 @@
   when: "{{ download.enabled|bool and not download.container|bool }}"
 
 - name: Create dest directories
-  file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
+  file:
+    path: "{{local_release_dir}}/{{download.dest|dirname}}"
+    state: directory
+    recurse: yes
   when: "{{ download.enabled|bool and not download.container|bool }}"
   tags: bootstrap-os
 
@@ -44,7 +47,12 @@
   tags: facts
 
 - name: Create dest directory for saved/loaded container images
-  file: path="{{local_release_dir}}/containers" state=directory recurse=yes mode=0755 owner={{ansible_ssh_user|default(ansible_user_id)}}
+  file:
+    path: "{{local_release_dir}}/containers"
+    state: directory
+    recurse: yes
+    mode: 0755
+    owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
   when: "{{ download.enabled|bool and download.container|bool }}"
   tags: bootstrap-os
 
@@ -58,7 +66,10 @@
   tags: localhost
 
 - name: Download | create local directory for saved/loaded container images
-  file: path="{{local_release_dir}}/containers" state=directory recurse=yes
+  file:
+    path: "{{local_release_dir}}/containers"
+    state: directory
+    recurse: yes
   delegate_to: localhost
   become: false
   run_once: true
@@ -105,7 +116,8 @@
   tags: facts
 
 - name: Stat saved container image
-  stat: path="{{fname}}"
+  stat:
+    path: "{{fname}}"
   register: img
   changed_when: false
   when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}"
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 33d6d471e02f045e9d02489af80adc1a8b0eea82..0efda4d091ad8ea0ce9bda57278d0d3c99aadcbc 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -16,7 +16,8 @@
   check_mode: no
   when: not download_always_pull|bool
 
-- set_fact: docker_images="{{docker_images_raw.stdout|regex_replace('\[|\]|\\n]','')|regex_replace('\s',',')}}"
+- set_fact:
+    docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}"
   no_log: true
   when: not download_always_pull|bool
 
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index 8c790e9cdbd5436611a9d8953620289d4ee38067..56dd9f4318b47d1865fd2a0a55e5bfa1def527a7 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -16,7 +16,9 @@
   when: is_etcd_master
 
 - name: wait for etcd up
-  uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no
+  uri:
+    url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
+    validate_certs: no
   register: result
   until: result.status is defined and result.status == 200
   retries: 10
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index f95ec97ca7a8e09c618058a34c51c3bfa0f4567c..bb6d55660b9da0de29d8922f01a3a67750fdd1dc 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -1,11 +1,11 @@
 ---
 - name: Gen_certs | create etcd cert dir
   file:
-    path={{ etcd_cert_dir }}
-    group={{ etcd_cert_group }}
-    state=directory
-    owner=root
-    recurse=yes
+    path: "{{ etcd_cert_dir }}"
+    group: "{{ etcd_cert_group }}"
+    state: directory
+    owner: root
+    recurse: yes
 
 - name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
   file:
@@ -17,11 +17,11 @@
 
 - name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
   file:
-    path={{ etcd_cert_dir }}
-    group={{ etcd_cert_group }}
-    state=directory
-    owner=root
-    recurse=yes
+    path: "{{ etcd_cert_dir }}"
+    group: "{{ etcd_cert_group }}"
+    state: directory
+    owner: root
+    recurse: yes
   run_once: yes
   delegate_to: "{{groups['etcd'][0]}}"
 
@@ -126,11 +126,11 @@
 
 - name: Gen_certs | check certificate permissions
   file:
-    path={{ etcd_cert_dir }}
-    group={{ etcd_cert_group }}
-    state=directory
-    owner=kube
-    recurse=yes
+    path: "{{ etcd_cert_dir }}"
+    group: "{{ etcd_cert_group }}"
+    state: directory
+    owner: kube
+    recurse: yes
 
 - name: Gen_certs | set permissions on keys
   shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 6e952cd33dc0bde9b0691ba5ae9a1823c398e228..1af265736755e22b307b0b6b633ba1a589cb069f 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -5,6 +5,7 @@
 - include: check_certs.yml
   when: cert_management == "script"
   tags: [etcd-secrets, facts]
+
 - include: gen_certs_script.yml
   when: cert_management == "script"
   tags: etcd-secrets
@@ -12,9 +13,11 @@
 - include: sync_etcd_master_certs.yml
   when: cert_management == "vault" and inventory_hostname in groups.etcd
   tags: etcd-secrets
+
 - include: sync_etcd_node_certs.yml
   when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts 
   tags: etcd-secrets
+
 - include: gen_certs_vault.yml
   when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d())
   tags: etcd-secrets
@@ -22,10 +25,13 @@
 - include: "install_{{ etcd_deployment_type }}.yml"
   when: is_etcd_master
   tags: upgrade
+
 - include: set_cluster_health.yml
   when: is_etcd_master
+
 - include: configure.yml
   when: is_etcd_master
+
 - include: refresh_config.yml
   when: is_etcd_master
 
@@ -50,5 +56,6 @@
 # state insted of `new`.
 - include: set_cluster_health.yml
   when: is_etcd_master
+
 - include: refresh_config.yml
   when: is_etcd_master
diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml
index eb17e987114624d1baea59b73948a6fbde66b85f..d498a03364028aa4b23fc1bf07a1daf4eb20193a 100644
--- a/roles/etcd/tasks/pre_upgrade.yml
+++ b/roles/etcd/tasks/pre_upgrade.yml
@@ -34,6 +34,11 @@
   command: "{{ docker_bin_dir }}/docker rm -f {{item}}"
   with_items: "{{etcd_proxy_container.stdout_lines}}"
 
+- name: "Pre-upgrade | see if etcdctl is installed"
+  stat:
+    path: "{{ bin_dir }}/etcdctl"
+  register: etcdctl_installed
+
 - name: "Pre-upgrade | check if member list is non-SSL"
   command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list"
   register: etcd_member_list
@@ -41,6 +46,7 @@
   delay: 3
   until: etcd_member_list.rc != 2
   run_once: true
+  when: etcdctl_installed.stat.exists
   failed_when: false
 
 - name: "Pre-upgrade | change peer names to SSL"
@@ -48,4 +54,4 @@
     {{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list |
     awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash
   run_once: true
-  when: 'etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout'
+  when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout'
diff --git a/roles/kernel-upgrade/tasks/centos-7.yml b/roles/kernel-upgrade/tasks/centos-7.yml
index b3181c2132de069ad7d21baeebaaf5c04d6d9937..a9de6b56f4a7cffd9169ade6578cda5e3d017eaf 100644
--- a/roles/kernel-upgrade/tasks/centos-7.yml
+++ b/roles/kernel-upgrade/tasks/centos-7.yml
@@ -1,7 +1,9 @@
 ---
 
 - name: install ELRepo key
-  rpm_key: state=present key='{{ elrepo_key_url }}'
+  rpm_key:
+    state: present
+    key: '{{ elrepo_key_url }}'
 
 - name: install elrepo repository
   yum:
@@ -9,7 +11,10 @@
     state: present
 
 - name: upgrade kernel
-  yum: name={{elrepo_kernel_package}} state=present enablerepo=elrepo-kernel
+  yum:
+    name: "{{elrepo_kernel_package}}"
+    state: present
+    enablerepo: elrepo-kernel
   register: upgrade
 
 - name: change default grub entry
diff --git a/roles/kernel-upgrade/tasks/reboot.yml b/roles/kernel-upgrade/tasks/reboot.yml
index 51c3833868b04fe12ead626d0886cff6c1a51216..5e01dd8fcf9297acb6f84f60388030001775a2bc 100644
--- a/roles/kernel-upgrade/tasks/reboot.yml
+++ b/roles/kernel-upgrade/tasks/reboot.yml
@@ -8,23 +8,33 @@
   shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" &
 
 - name: Wait for some seconds
-  pause: seconds=10
+  pause:
+    seconds: 10
 
 - set_fact:
     is_bastion: "{{ inventory_hostname == 'bastion' }}"
     wait_for_delegate: "localhost"
+
 - set_fact:
     wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
   when: "{{ 'bastion' in groups['all'] }}"
 
 - name: wait for bastion to come back
-  wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300
+  wait_for:
+    host: "{{ ansible_ssh_host }}"
+    port: 22
+    delay: 10
+    timeout: 300
   become: false
   delegate_to: localhost
   when: "is_bastion"
 
 - name: waiting for server to come back (using bastion if necessary)
-  wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300
+  wait_for:
+    host: "{{ ansible_ssh_host }}"
+    port: 22
+    delay: 10
+    timeout: 300
   become: false
   delegate_to: "{{ wait_for_delegate }}"
   when: "not is_bastion"
diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml
index 447fb719f89f2eda1a9d1db72c34fe3a4661d6c8..c6a6bd94da4e6d0ee47bfbc5785c65f2dc1ff6b8 100644
--- a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml
+++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml
@@ -5,7 +5,9 @@
   tags: facts
 
 - name: Write calico-policy-controller yaml
-  template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml
+  template:
+    src: calico-policy-controller.yml.j2
+    dest: "{{kube_config_dir}}/calico-policy-controller.yml"
   when: inventory_hostname == groups['kube-master'][0]
 
 - name: Start of Calico policy controller
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yml
similarity index 90%
rename from roles/kubernetes-apps/ansible/tasks/main.yaml
rename to roles/kubernetes-apps/ansible/tasks/main.yml
index 787fa156a2f015f6e5d9b837e8a117c3030fe5f0..04554e785d7ebc3c0ec5771aff0cdfa6e715e788 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yaml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -1,6 +1,7 @@
 ---
 - name: Kubernetes Apps | Wait for kube-apiserver
-  uri: url=http://localhost:8080/healthz
+  uri:
+    url: http://localhost:8080/healthz
   register: result
   until: result.status == 200
   retries: 10
@@ -8,7 +9,9 @@
   when: inventory_hostname == groups['kube-master'][0]
 
 - name: Kubernetes Apps | Lay Down KubeDNS Template
-  template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
+  template:
+    src: "{{item.file}}"
+    dest: "{{kube_config_dir}}/{{item.file}}"
   with_items:
     - {file: kubedns-rc.yml, type: rc}
     - {file: kubedns-svc.yml, type: svc}
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index 0413e4bb694c175d194285639f6ea51e0c450d3a..6319d1c1c20cf46fc82c99e41182efd7700f2ace 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -1,5 +1,7 @@
 - name: Kubernetes Apps | Lay Down Netchecker Template
-  template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
+  template:
+    src: "{{item.file}}"
+    dest: "{{kube_config_dir}}/{{item.file}}"
   with_items:
     - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
     - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 9fd691ddd92887b9d52a5961335c1219ae2caa10..edf2509d50143d0b027f31d919f5e60008a4f344 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -1,2 +1,3 @@
 ---
-- debug: msg="No helm charts"
+- debug:
+    msg: "No helm charts"
diff --git a/roles/kubernetes-apps/kpm/tasks/main.yaml b/roles/kubernetes-apps/kpm/tasks/main.yml
similarity index 100%
rename from roles/kubernetes-apps/kpm/tasks/main.yaml
rename to roles/kubernetes-apps/kpm/tasks/main.yml
diff --git a/roles/kubernetes-apps/meta/main.yaml b/roles/kubernetes-apps/meta/main.yml
similarity index 100%
rename from roles/kubernetes-apps/meta/main.yaml
rename to roles/kubernetes-apps/meta/main.yml
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
similarity index 100%
rename from roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml
rename to roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index 3854822765df1ceedf23dc87e99e406c41a1a0cb..38edeeb1ffa818e0a18012f273cd10668e81ed6d 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -22,21 +22,24 @@
     state: restarted
 
 - name: Master | wait for kube-scheduler
-  uri: url=http://localhost:10251/healthz
+  uri:
+    url: http://localhost:10251/healthz
   register: scheduler_result
   until: scheduler_result.status == 200
   retries: 15
   delay: 5
 
 - name: Master | wait for kube-controller-manager
-  uri: url=http://localhost:10252/healthz
+  uri:
+    url: http://localhost:10252/healthz
   register: controller_manager_result
   until: controller_manager_result.status == 200
   retries: 15
   delay: 5
 
 - name: Master | wait for the apiserver to be running
-  uri: url=http://localhost:8080/healthz
+  uri:
+    url: http://localhost:8080/healthz
   register: result
   until: result.status == 200
   retries: 10
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index a622594a182424202bd2e1be6640298ad2f5803f..67a64d4a60a58ed8dd545606a3f6292e1996ea7d 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -36,7 +36,9 @@
 - meta: flush_handlers
 
 - name: copy kube system namespace manifest
-  copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml
+  copy:
+    src: namespace.yml
+    dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
   run_once: yes
   when: inventory_hostname == groups['kube-master'][0]
   tags: apps
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 8c6bf7bb112fc10ca47409751ce8fa6aae099211..1bb0c0344809ee0259a75805400763836d1b334b 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -43,7 +43,8 @@
   when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
 
 - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod"
-  pause: seconds=20
+  pause:
+    seconds: 20
   when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
   tags: kube-apiserver
 
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 2c18937c9a464f4d62b038885f157eb3e9bf263b..5b7453132d9a0a3791cdc242b2e9be2f369130e1 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -12,12 +12,18 @@
   tags: nginx
 
 - name: Write kubelet config file
-  template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes
+  template:
+    src: kubelet.j2
+    dest: "{{ kube_config_dir }}/kubelet.env"
+    backup: yes
   notify: restart kubelet
   tags: kubelet
 
 - name: write the kubecfg (auth) file for kubelet
-  template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
+  template:
+    src: node-kubeconfig.yaml.j2
+    dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
+    backup: yes
   notify: restart kubelet
   tags: kubelet
 
diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml
index 885b84f8f000a23c10217c68c3436b385f1394df..36cb32592c0d5e95be65fd74ab6922a88e3d9be1 100644
--- a/roles/kubernetes/node/tasks/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/nginx-proxy.yml
@@ -1,9 +1,20 @@
 ---
 - name: nginx-proxy | Write static pod
-  template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml
+  template:
+    src: manifests/nginx-proxy.manifest.j2
+    dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
 
 - name: nginx-proxy | Make nginx directory
-  file: path=/etc/nginx state=directory mode=0700 owner=root
+  file:
+    path: /etc/nginx
+    state: directory
+    mode: 0700
+    owner: root
 
 - name: nginx-proxy | Write nginx-proxy configuration
-  template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes
+  template:
+    src: nginx.conf.j2
+    dest: "/etc/nginx/nginx.conf"
+    owner: root
+    mode: 0755
+    backup: yes
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
index f233f4c1d57bc95cd7109c9c1c65ad1de37ed2f8..10e5bba68966c79c4fe58118820b7dc0c64369c2 100644
--- a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
+++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
@@ -14,7 +14,9 @@
   notify: Preinstall | restart network
 
 - name: Remove kargo specific dhclient hook
-  file: path="{{ dhclienthookfile }}" state=absent
+  file:
+    path: "{{ dhclienthookfile }}"
+    state: absent
   when: dhclienthookfile is defined
   notify: Preinstall | restart network
 
diff --git a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
index afd5ff229dbb63688468fdb611c45363a28b5063..2df6962e826b5b00f64aa6a8e73b0ff7365fbefd 100644
--- a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
@@ -3,7 +3,9 @@
 # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
 
 - name: install growpart
-  package: name=cloud-utils-growpart state=latest
+  package:
+    name: cloud-utils-growpart
+    state: latest
 
 - name: check if growpart needs to be run
   command: growpart -N /dev/sda 1
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index f8b4987d66c3e938358e7b7cfb0ca0074d5e6145..5b79c101d0ea9f5ccc1667502ec469147d2ed824 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -88,12 +88,18 @@
   tags: [network, calico, weave, canal, bootstrap-os]
 
 - name: Update package management cache (YUM)
-  yum: update_cache=yes name='*'
+  yum:
+    update_cache: yes
+    name: '*'
   when: ansible_pkg_mgr == 'yum'
   tags: bootstrap-os
 
 - name: Install latest version of python-apt for Debian distribs
-  apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600
+  apt:
+    name: python-apt
+    state: latest
+    update_cache: yes
+    cache_valid_time: 3600
   when: ansible_os_family == "Debian"
   tags: bootstrap-os
 
@@ -125,9 +131,17 @@
   tags: bootstrap-os
 
 # Todo : selinux configuration
-- name: Set selinux policy to permissive
-  selinux: policy=targeted state=permissive
+- name: Confirm selinux deployed
+  stat:
+    path: /etc/selinux/config
   when: ansible_os_family == "RedHat"
+  register: slc
+
+- name: Set selinux policy to permissive
+  selinux:
+    policy: targeted
+    state: permissive
+  when: ansible_os_family == "RedHat" and slc.stat.exists == True
   changed_when: False
   tags: bootstrap-os
 
@@ -146,7 +160,8 @@
   tags: bootstrap-os
 
 - name: Stat sysctl file configuration
-  stat: path={{sysctl_file_path}}
+  stat:
+    path: "{{sysctl_file_path}}"
   register: sysctl_file_stat
   tags: bootstrap-os
 
@@ -198,7 +213,8 @@
   tags: [bootstrap-os, resolvconf]
 
 - name: Check if we are running inside a Azure VM
-  stat: path=/var/lib/waagent/
+  stat:
+    path: /var/lib/waagent/
   register: azure_check
   tags: bootstrap-os
 
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
index 456467a97cf1f9415e035af2280cdf1f7b25647f..214aecceff0937b03f6520dc25e3e94923f131d1 100644
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_facts.yml
@@ -1,12 +1,23 @@
 ---
-- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}"
-- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}"
-- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}"
-- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
-- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
+- set_fact:
+    kube_apiserver_count: "{{ groups['kube-master'] | length }}"
+
+- set_fact:
+    kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
+
+- set_fact:
+    kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
+
+- set_fact:
+    is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
+
+- set_fact:
+    first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
+
 - set_fact:
     loadbalancer_apiserver_localhost: false
   when: loadbalancer_apiserver is defined
+
 - set_fact:
     kube_apiserver_endpoint: |-
       {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
@@ -21,34 +32,54 @@
       {%-  endif -%}
       {%- endif %}
 
-- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}"
-- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}"
-- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
-- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379"
-- set_fact: etcd_authority="127.0.0.1:2379"
-- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
+- set_fact:
+    etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
+
+- set_fact:
+    etcd_access_address: "{{ access_ip | default(etcd_address) }}"
+
+- set_fact:
+    etcd_peer_url: "https://{{ etcd_access_address }}:2380"
+
+- set_fact:
+    etcd_client_url: "https://{{ etcd_access_address }}:2379"
+
+- set_fact:
+    etcd_authority: "127.0.0.1:2379"
+
+- set_fact:
+    etcd_endpoint: "https://{{ etcd_authority }}"
+
 - set_fact:
     etcd_access_addresses: |-
       {% for item in groups['etcd'] -%}
         https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
       {%- endfor %}
-- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
+
+- set_fact:
+    etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
+
 - set_fact:
     etcd_member_name: |-
       {% for host in groups['etcd'] %}
       {%   if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
       {% endfor %}
+
 - set_fact:
     etcd_peer_addresses: |-
       {% for item in groups['etcd'] -%}
         {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
       {%- endfor %}
+
 - set_fact:
     is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
+
 - set_fact:
     etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
+
 - set_fact:
     etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
+
 - set_fact:
     peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
 
diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
index c2413e89f53496b74a913880781b801c99e5a29a..ffea74b40b9db1c8727cd795bffc95fe3340ae20 100644
--- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
@@ -39,11 +39,13 @@
   when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
 - name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
-  set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf
+  set_fact:
+    resolvconffile: /tmp/resolveconf_cloud_init_conf
   when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
 - name: check if /etc/dhclient.conf exists
-  stat: path=/etc/dhclient.conf
+  stat:
+    path: /etc/dhclient.conf
   register: dhclient_stat
 
 - name: target dhclient conf file for /etc/dhclient.conf
@@ -52,7 +54,8 @@
   when: dhclient_stat.stat.exists
 
 - name: check if /etc/dhcp/dhclient.conf exists
-  stat: path=/etc/dhcp/dhclient.conf
+  stat:
+    path: /etc/dhcp/dhclient.conf
   register: dhcp_dhclient_stat
 
 - name: target dhclient conf file for /etc/dhcp/dhclient.conf
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index cd742d6479fab6927b9a43a649bac749a057bd5a..f75a45d1ae31810dc75fd2a348caf35e898a454a 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -146,10 +146,10 @@
 
 - name: Gen_certs | check certificate permissions
   file:
-    path={{ kube_cert_dir }}
-    group={{ kube_cert_group }}
-    owner=kube
-    recurse=yes
+    path: "{{ kube_cert_dir }}"
+    group: "{{ kube_cert_group }}"
+    owner: kube
+    recurse: yes
 
 - name: Gen_certs | set permissions on keys
   shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
index f442b62b3b320f10efc8e7414f3c33cee85ea26c..ab2cb76b2c0a5dcfd83e752e1d24a84b2827618e 100644
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ b/roles/kubernetes/secrets/tasks/main.yml
@@ -1,29 +1,30 @@
 ---
 - include: check-certs.yml
   tags: [k8s-secrets, facts]
+
 - include: check-tokens.yml
   tags: [k8s-secrets, facts]
 
 - name: Make sure the certificate directory exits
   file:
-    path={{ kube_cert_dir }}
-    state=directory
-    mode=o-rwx
-    group={{ kube_cert_group }}
+    path: "{{ kube_cert_dir }}"
+    state: directory
+    mode: o-rwx
+    group: "{{ kube_cert_group }}"
 
 - name: Make sure the tokens directory exits
   file:
-    path={{ kube_token_dir }}
-    state=directory
-    mode=o-rwx
-    group={{ kube_cert_group }}
+    path: "{{ kube_token_dir }}"
+    state: directory
+    mode: o-rwx
+    group: "{{ kube_cert_group }}"
 
 - name: Make sure the users directory exits
   file:
-    path={{ kube_users_dir }}
-    state=directory
-    mode=o-rwx
-    group={{ kube_cert_group }}
+    path: "{{ kube_users_dir }}"
+    state: directory
+    mode: o-rwx
+    group: "{{ kube_cert_group }}"
 
 - name: Populate users for basic auth in API
   lineinfile:
@@ -62,10 +63,10 @@
 
 - name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
   file:
-    path={{ kube_token_dir }}
-    state=directory
-    mode=o-rwx
-    group={{ kube_cert_group }}
+    path: "{{ kube_token_dir }}"
+    state: directory
+    mode: o-rwx
+    group: "{{ kube_cert_group }}"
   run_once: yes
   delegate_to: "{{groups['kube-master'][0]}}"
   when: gen_tokens|default(false)
@@ -77,9 +78,11 @@
 - include: sync_kube_master_certs.yml
   when: cert_management == "vault" and inventory_hostname in groups['kube-master']
   tags: k8s-secrets
+
 - include: sync_kube_node_certs.yml
   when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
   tags: k8s-secrets
+
 - include: gen_certs_vault.yml
   when: cert_management == "vault"
   tags: k8s-secrets
diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2
index ac94b6800ca0fbc6c69c7e996a07404a3d903a53..d3164286ea80959cc6a4f97e4e6d87dbe39e7d2c 100644
--- a/roles/kubernetes/secrets/templates/openssl.conf.j2
+++ b/roles/kubernetes/secrets/templates/openssl.conf.j2
@@ -16,7 +16,7 @@ DNS.5 = localhost
 DNS.{{ 5 + loop.index }} = {{ host }}
 {% endfor %}
 {% if loadbalancer_apiserver is defined  and apiserver_loadbalancer_domain_name is defined %}
-{% set idx =  groups['kube-master'] | length | int + 5 %}
+{% set idx =  groups['kube-master'] | length | int + 5 + 1 %}
 DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
 {% endif %}
 {% for host in groups['kube-master'] %}
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index efe4616d25d8beb41fa23e86f177fc088e60bb46..5197aa0052b088c45ce2b5b5c6ca2358818e565e 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -35,11 +35,15 @@
     group: root
 
 - name: Calico-rr | Write calico-rr.env for systemd init file
-  template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env
+  template:
+    src: calico-rr.env.j2
+    dest: /etc/calico/calico-rr.env
   notify: restart calico-rr
 
 - name: Calico-rr | Write calico-rr systemd init file
-  template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service
+  template:
+    src: calico-rr.service.j2
+    dest: /etc/systemd/system/calico-rr.service
   notify: restart calico-rr
 
 - name: Calico-rr | Configure route reflector
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 6d738bd37c08f8927edbf87504ba3bfa4db6908d..eefed471f2bae7c79e6030c27dbe82c7b888e0cd 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -60,7 +60,9 @@
   tags: [hyperkube, upgrade]
 
 - name: Calico | wait for etcd
-  uri: url=https://localhost:2379/health validate_certs=no
+  uri:
+    url: https://localhost:2379/health
+    validate_certs: no
   register: result
   until: result.status == 200 or result.status == 401
   retries: 10
@@ -160,17 +162,23 @@
   when: legacy_calicoctl
 
 - name: Calico (old) | Write calico-node systemd init file
-  template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service
+  template:
+    src: calico-node.service.legacy.j2
+    dest: /etc/systemd/system/calico-node.service
   when: legacy_calicoctl
   notify: restart calico-node
 
 - name: Calico | Write calico.env for systemd init file
-  template: src=calico.env.j2 dest=/etc/calico/calico.env
+  template:
+    src: calico.env.j2
+    dest: /etc/calico/calico.env
   when: not legacy_calicoctl
   notify: restart calico-node
 
 - name: Calico | Write calico-node systemd init file
-  template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
+  template:
+    src: calico-node.service.j2
+    dest: /etc/systemd/system/calico-node.service
   when: not legacy_calicoctl
   notify: restart calico-node
 
diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml
index 82810ac98e76d9982f483eddf2375a2542e9f01c..98c93a53a5e2d47fb3483d3f9aa2461f4559da35 100644
--- a/roles/network_plugin/flannel/handlers/main.yml
+++ b/roles/network_plugin/flannel/handlers/main.yml
@@ -28,7 +28,9 @@
     state: restarted
 
 - name: Flannel | pause while Docker restarts
-  pause: seconds=10 prompt="Waiting for docker restart"
+  pause:
+    seconds: 10
+    prompt: "Waiting for docker restart"
 
 - name: Flannel | wait for docker
   command: "{{ docker_bin_dir }}/docker images"
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 52cb193709b91b983455c6e095e473a1d7126fd1..5b17a094b2f1cb02b364413fa56a984d791f8543 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -1,7 +1,9 @@
 ---
 
 - name: reset | stop services
-  service: name={{ item }} state=stopped
+  service:
+    name: "{{ item }}"
+    state: stopped
   with_items:
     - kubelet
     - etcd
@@ -33,7 +35,9 @@
   shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
 
 - name: reset | restart docker if needed
-  service: name=docker state=restarted
+  service:
+    name: docker
+    state: restarted
   when: docker_dropins_removed.changed
 
 - name: reset | gather mounted kubelet dirs
@@ -46,7 +50,9 @@
   with_items: '{{ mounted_dirs.stdout_lines }}'
 
 - name: reset | delete some files and directories
-  file: path={{ item }} state=absent
+  file:
+    path: "{{ item }}"
+    state: absent
   with_items:
     - "{{kube_config_dir}}"
     - /var/lib/kubelet
diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml
index af9572532ae7fc9bea6372020ab966bcaffb58de..2a23087a4012f691821d67525dab8e4477727be8 100644
--- a/roles/uploads/defaults/main.yml
+++ b/roles/uploads/defaults/main.yml
@@ -4,7 +4,7 @@ local_release_dir: /tmp
 # Versions
 etcd_version: v3.0.6
 calico_version: v0.23.0
-calico_cni_version: v1.4.2
+calico_cni_version: v1.5.6
 weave_version: v1.8.2
 
 # Download URL's
@@ -14,8 +14,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea
 weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave"
 
 # Checksums
-calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
-calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
+calico_cni_checksum: "9a6bd6da267c498a1833117777c069f44f720d23226d8459bada2a0b41cb8258"
+calico_cni_ipam_checksum: "8d3574736df1ce10ea88fdec94d84dc58642081d3774d2d48249c6ee94ed316d"
 weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9"
 etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
 
diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml
index 2d600059940f9649c4dd14748addc16c9022cf5f..a770020c288fa5b44dbb5b44f97092a61026ec85 100644
--- a/roles/uploads/tasks/main.yml
+++ b/roles/uploads/tasks/main.yml
@@ -1,6 +1,9 @@
 ---
 - name: Create dest directories
-  file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
+  file:
+    path: "{{local_release_dir}}/{{item.dest|dirname}}"
+    state: directory
+    recurse: yes
   with_items: '{{downloads}}'
 
 - name: Download items
diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml
index edd2912d3a25207891eeff8fbf3d6a16c50abf59..98904bbe75e69192fe0039c634ad64eb6563b9a8 100644
--- a/roles/vault/tasks/bootstrap/main.yml
+++ b/roles/vault/tasks/bootstrap/main.yml
@@ -2,8 +2,10 @@
 
 - include: ../shared/check_vault.yml
   when: inventory_hostname in groups.vault
+
 - include: sync_secrets.yml
   when: inventory_hostname in groups.vault
+
 - include: ../shared/find_leader.yml
   when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d()
 
@@ -54,5 +56,6 @@
 
 - include: role_auth_cert.yml
   when: vault_role_auth_method == "cert"
+
 - include: role_auth_userpass.yml
   when: vault_role_auth_method == "userpass"
diff --git a/roles/vault/tasks/bootstrap/role_auth_cert.yml b/roles/vault/tasks/bootstrap/role_auth_cert.yml
index 7bbf58e860ca05989a32fb50556a940b7ac08e1d..d92cd9d69198a2234fb9465e1268955ac5fb89a0 100644
--- a/roles/vault/tasks/bootstrap/role_auth_cert.yml
+++ b/roles/vault/tasks/bootstrap/role_auth_cert.yml
@@ -21,5 +21,6 @@
     ca_name: auth-ca
     mount_name: auth-pki
   when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed
+
 - include: create_etcd_role.yml
   when: inventory_hostname in groups.etcd
diff --git a/roles/vault/tasks/bootstrap/role_auth_userpass.yml b/roles/vault/tasks/bootstrap/role_auth_userpass.yml
index ad09ab05b2b7a90a4cc3714f18403372cf6d97aa..2ad2fbc91084fba151445db2863d63a8d38c87ff 100644
--- a/roles/vault/tasks/bootstrap/role_auth_userpass.yml
+++ b/roles/vault/tasks/bootstrap/role_auth_userpass.yml
@@ -6,5 +6,6 @@
     auth_backend_path: userpass
     auth_backend_type: userpass
   when: inventory_hostname == groups.vault|first
+
 - include: create_etcd_role.yml
   when: inventory_hostname in groups.etcd
diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml
index 5dab550aac0a739b38d470be842e0eb6d53cc8a4..db97dd0781b53fdb2eb018c20e8b6cd4e84ab587 100644
--- a/roles/vault/tasks/cluster/main.yml
+++ b/roles/vault/tasks/cluster/main.yml
@@ -2,6 +2,7 @@
 
 - include: ../shared/check_vault.yml
   when: inventory_hostname in groups.vault
+
 - include: ../shared/check_etcd.yml
   when: inventory_hostname in groups.vault
 
@@ -9,18 +10,25 @@
 
 - include: configure.yml
   when: inventory_hostname in groups.vault
+
 - include: binary.yml
   when: inventory_hostname in groups.vault and vault_deployment_type == "host"
+
 - include: systemd.yml
   when: inventory_hostname in groups.vault
+
 - include: init.yml
   when: inventory_hostname in groups.vault
+
 - include: unseal.yml
   when: inventory_hostname in groups.vault
+
 - include: ../shared/find_leader.yml
   when: inventory_hostname in groups.vault
+
 - include: ../shared/pki_mount.yml 
   when: inventory_hostname == groups.vault|first
+
 - include: ../shared/config_ca.yml
   vars:
     ca_name: ca
@@ -31,5 +39,6 @@
 
 - include: role_auth_cert.yml
   when: vault_role_auth_method == "cert"
+
 - include: role_auth_userpass.yml
   when: vault_role_auth_method == "userpass"